arxiv_id
stringlengths 0
16
| text
stringlengths 10
1.65M
|
|---|---|
import dataio
from TensorFlowRecommender import TensorFlowRecommender
import numpy as np
np.random.seed(13575)
def get_data():
df = dataio.read_process("data/ml-1m/ratings.dat", sep="::")
rows = len(df)
df = df.iloc[np.random.permutation(rows)].reset_index(drop=True)
split_index = int(rows * 0.9)
df_train = df[0:split_index]
df_test = df[split_index:].reset_index(drop=True)
return df_train, df_test
if __name__ == '__main__':
df_train, df_test = get_data()
tfr = TensorFlowRecommender()
tfr.fit(df_train, df_test, epoch_max = 6)
print("Done fitting")
print
print "Top 10 items for user 1:"
print "{:6s} {:11s}".format("Item #", "Pred Rating")
print "-"*22
for item, score in tfr.predictTopK(1, 10):
print "{:>6d} {:>11.5f}".format(item, score)
|
|
import random
import numpy as np
class DefaultRandomGenerator:
def rand(self, size=None):
if size is None:
return random.random()
else:
n = size[0]
m = size[1]
val = np.zeros((n, m))
for i in range(n):
for j in range(m):
val[i, j] = random.random()
return val
def perm(self, length, n=1):
perms = []
for _ in range(n):
perm = list(range(length))
random.shuffle(perm)
perms.append(perm)
return perms
def seed(self, x):
random.seed(x)
def randint(self, low, high, size=None):
if low >= high:
return low
else:
res = low + (self.rand(size) * (high - low))
if res > high:
res = high
return int(res)
|
|
from styx_msgs.msg import TrafficLight
import rospy
import rospkg
import numpy as np
import os
import sys
import tensorflow as tf
from collections import defaultdict
from io import StringIO
from object_detection_classifier import ObjectDetectionClassifier
import time
UNKNOWN = 'UNKNOWN'
YELLOW = 'Yellow'
GREEN = 'Green'
RED = 'Red'
NUM_CLASSES = 14
class MLClassifier(object):
def __init__(self, model_path, labels_path):
# set default value for no detection
self.current_light = TrafficLight.UNKNOWN
self.classifier = ObjectDetectionClassifier(model_path, labels_path, NUM_CLASSES)
rospy.logdebug("Loaded the model")
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
self.current_light = TrafficLight.UNKNOWN
class_name = self.classifier.classify_image(image)
if class_name == RED:
self.current_light = TrafficLight.RED
elif class_name == GREEN:
self.current_light = TrafficLight.GREEN
elif class_name == YELLOW:
self.current_light = TrafficLight.YELLOW
rospy.logdebug('Image classified as {}'.format(class_name))
return self.current_light
|
|
import numpy as np
from math import pi, sqrt
from sys import platform
if platform == "darwin": # MACOS
from openseespymac.opensees import *
else:
from openseespy.opensees import *
import os
''' FUNCTION: build_model ------------------------------------------------------
Generates OpenSeesPy model of an elastic-perfectly plastic SDOF system and runs
gravity analysis.
Inputs: in model_params
W - weight of structure
f_yield - yield stiffness
T1 - fundamental period
Outputs:
-----------------------------------------------------------------------------'''
def build_model(model_params):
G = 386.1
W = model_params["W"]
f_yield = model_params["f_yield"]
T1 = model_params["T1"]
m = W / G
print("m: " + str(m))
# set model dimensions and deg of freedom
model('basic', '-ndm', 3, '-ndf', 6)
# define nodes
base_node_tag = 10000
top_node_tag = 10001
height = 240. # in
node(base_node_tag, 0., 0., 0.)
node(top_node_tag, 0., 0., height)
# define fixities
fix(base_node_tag, 1, 1, 1, 1, 1, 1)
fix(top_node_tag, 0, 0, 0, 1, 1, 1)
# define bilinear (elastic-perfectly plastic) material
material_tag = 100
stiffmat = 110
K = m / (T1/(2*pi))**2
print("K: " + str(K))
uniaxialMaterial('Steel01', material_tag, f_yield, K, 0.0001)
uniaxialMaterial('Elastic', stiffmat, 1.e9)
# define element
element_tag = 1000
element('twoNodeLink', element_tag, base_node_tag, top_node_tag, '-mat', stiffmat, material_tag, material_tag, '-dir', 1, 2, 3, '-orient', 0., 0., 1., 0., 1., 0., '-doRayleigh')
# define mass
mass(top_node_tag, m, m, m, 0., 0., 0.)
# define gravity loads
# W = m * 386.01 # g
timeSeries('Linear', 1)
pattern('Plain', 101, 1)
load(top_node_tag, 0., 0., -W, 0., 0., 0.)
# define damping based on first eigenmode
damp_ratio = 0.05
angular_freq = eigen(1)[0]**0.5
beta_k = 2 * damp_ratio / angular_freq
rayleigh(0., beta_k, 0., 0.)
# run gravity analysis
tol = 1e-8 # convergence tolerance for test
iter = 100 # max number of iterations
nstep = 100 # apply gravity loads in 10 steps
incr = 1./nstep # first load increment
# analysis settings
constraints('Transformation') # enforce boundary conditions using transformation constraint handler
numberer('RCM') # renumbers dof's to minimize band-width (optimization)
system('BandGeneral') # stores system of equations as 1D array of size bandwidth x number of unknowns
test('EnergyIncr', tol, iter, 0) # tests for convergence using dot product of solution vector and norm of right-hand side of matrix equation
algorithm('Newton') # use Newton's solution algorithm: updates tangent stiffness at every iteration
integrator('LoadControl', incr) # determine the next time step for an analysis # apply gravity in 10 steps
analysis('Static') # define type of analysis, static or transient
analyze(nstep) # perform gravity analysis
# after gravity analysis, change time and tolerance for the dynamic analysis
loadConst('-time', 0.0)
# FUNCTION: PeakDriftRecorder --------------------------------------------------
# saves envelope of interstory drift ratio for each story at one analysis step
# ------------------------------------------------------------------------------
def PeakDriftRecorder(EDP_specs, envDict):
# inputs:
# EDP_specs = dictionary of EDP type, location, direction
# envDict = dictionary of envelope values
for loc in EDP_specs['PID']:
pos = 0
for dof in EDP_specs['PID'][loc]:
storynodes = [int(x) for x in EDP_specs['PID'][loc][dof]]
# print("computing drifts for nodes: {}".format(storynodes))
story_height = nodeCoord(storynodes[1],3) - nodeCoord(storynodes[0],3)
# compute drift
topDisp = nodeDisp(storynodes[1],dof)
botDisp = nodeDisp(storynodes[0],dof)
new_drift = abs((topDisp-botDisp)/story_height)
# update dictionary
curr_drift = envDict['PID'][loc][pos]
new_max = max(new_drift, curr_drift)
envDict['PID'][loc][pos] = new_max
pos += 1
return envDict
# FUNCTION: AccelHistoryRecorder -----------------------------------------------
# saves time history of relative floor acceleration for each story at one analysis step
# ------------------------------------------------------------------------------
def AccelHistoryRecorder(EDP_specs, histDict, count):
# inputs:
# histDict = dictionary of time histories
# recorderNodes = list of nodes where EDP is recorded
# count = current count in the time history
for loc in EDP_specs['PFA']:
for dof in EDP_specs['PFA'][loc]:
storynode = int(EDP_specs['PFA'][loc][dof][0])
# obtain acceleration
new_acc = nodeAccel(storynode, dof)
histDict['accel'][loc][dof][count] = new_acc
return histDict
# FUNCTION: RunDynamicAnalysis -------------------------------------------------
# performs dynamic analysis and records EDPs in dictionary
# ------------------------------------------------------------------------------
def RunDynamicAnalysis(tol,iter,dt,driftLimit,EDP_specs,subSteps,GMX,GMZ):
# inputs:
# tol = tolerance criteria to check for convergence
# iter = max number of iterations to check
# dt = time increment for analysis
# driftLimit = percent interstory drift limit indicating collapse
# recorderNodes = vector of node labels used to check global drifts and record EDPs
# subSteps = number of subdivisions in cases of ill convergence
# GMX = list of GM acceleration ordinates in X direction
# GMZ = list of GM acceleration ordinates in Z direction
# pad shorter record with zeros (free vibration) such that two horizontal records are the same length
nsteps = max(len(GMX),len(GMZ))
if len(GMX) < nsteps:
diff = nsteps - len(GMX)
GMX.extend(np.zeros(diff))
if len(GMZ) < nsteps:
diff = nsteps - len(GMZ)
GMZ.extend(np.zeros(diff))
# generate time array from recording
time_record = np.linspace(0,nsteps*dt,num=nsteps,endpoint=False)
# initialize dictionary of envelope EDPs
envelopeDict = {}
for edp in EDP_specs:
envelopeDict[edp] = {}
for loc in EDP_specs[edp]:
numdof = len(EDP_specs[edp][loc])
envelopeDict[edp][loc] = np.zeros(numdof).tolist()
print(envelopeDict)
# initialize dictionary of time history EDPs
historyDict = {'accel':{}}
time_analysis = np.zeros(nsteps*5)
for loc in EDP_specs['PFA']:
historyDict['accel'][loc] = {}
for dof in EDP_specs['PFA'][loc]:
historyDict['accel'][loc][dof] = np.zeros(nsteps*5)
# number of diaphragm levels
levels = len(EDP_specs['PFA'])
CODnodes = []
for loc in EDP_specs['PFA']:
CODnodes.append(int(EDP_specs['PFA'][loc][1][0]))
print(CODnodes)
constraints('Transformation') # handles boundary conditions based on transformation equation method
numberer('RCM') # renumber dof's to minimize band-width (optimization)
system('UmfPack') # constructs sparse system of equations using UmfPack solver
test('NormDispIncr',tol,iter) # tests for convergence using norm of left-hand side of matrix equation
algorithm('NewtonLineSearch') # use Newton's solution algorithm: updates tangent stiffness at every iteration
integrator('Newmark', 0.5, 0.25) # Newmark average acceleration method for numerical integration
analysis('Transient') # define type of analysis: time-dependent
# initialize variables
maxDiv = 1024
minDiv = subSteps
step = 0
ok = 0
breaker = 0
maxDrift = 0
count = 0
while step<nsteps and ok==0 and breaker==0:
step = step + 1 # take 1 step
ok = 2
div = minDiv
length = maxDiv
while div<=maxDiv and length>0 and breaker==0:
stepSize = dt/div
ok = analyze(1,stepSize) # perform analysis for one increment; will return 0 if no convergence issues
if ok==0:
count = count + 1
length = length - maxDiv/div
# check if drift limits are satisfied
level = 1
while level < levels:
story_height = nodeCoord(CODnodes[level],3)-nodeCoord(CODnodes[level-1],3)
# check X direction drifts (direction 1)
topDisp = nodeDisp(CODnodes[level],1)
botDisp = nodeDisp(CODnodes[level-1],1)
deltaDisp = abs(topDisp-botDisp)
drift = deltaDisp/story_height
if drift >= driftLimit:
breaker = 1
# check Y direction drifts (direction 2)
topDisp = nodeDisp(CODnodes[level],2)
botDisp = nodeDisp(CODnodes[level-1],2)
deltaDisp = abs(topDisp-botDisp)
drift = deltaDisp/story_height
if drift >= driftLimit:
breaker = 1
# move on to check next level
level = level + 1
# save parameter values in recording dictionaries at every step
time_analysis[count] = time_analysis[count-1]+stepSize
envelopeDict = PeakDriftRecorder(EDP_specs, envelopeDict)
historyDict = AccelHistoryRecorder(EDP_specs, historyDict, count)
else: # if ok != 0
div = div*2
print("Number of increments increased to ",str(div))
# end analysis once drift limit has been reached
if breaker == 1:
ok = 1
print("Collapse drift has been reached")
print("Number of analysis steps completed: {}".format(count))
# remove extra zeros from time history
time_analysis = time_analysis[1:count+1]
historyDict['time'] = time_analysis.tolist()
# remove extra zeros from accel time history, add GM to obtain absolute acceleration, and record envelope value
GMX_interp = np.interp(time_analysis, time_record, GMX)
GMZ_interp = np.interp(time_analysis, time_record, GMZ)
for level in range(0,levels):
# X direction
historyDict['accel'][level][1] = historyDict['accel'][level][1][1:count+1]
historyDict['accel'][level][1] = np.asarray(historyDict['accel'][level][1]) + GMX_interp
envelopeDict['PFA'][level][0] = max(abs(historyDict['accel'][level][1]))
# Z direction
historyDict['accel'][level][2] = historyDict['accel'][level][2][1:count+1]
historyDict['accel'][level][2] = np.asarray(historyDict['accel'][level][2]) + GMZ_interp
envelopeDict['PFA'][level][1] = max(abs(historyDict['accel'][level][2]))
return envelopeDict
# MAIN: run_analysis -----------------------------------------------------------
# runs dynamic analysis for single event and returns dictionary of envelope EDPs
# ------------------------------------------------------------------------------
def run_analysis(GM_dt, GM_npts, TS_List, EDP_specs):
# inputs:
# GM_dt = time step of GM record
# GM_npts = number of steps in GM record
# TS_List = 1x2 list where first component is a list of GMX acceleration points, second component is a list of GMZ acceleration points (scaled and multipled by G)
GMX_points = TS_List[0]
GMZ_points = TS_List[1]
# print(EDP_specs)
wipeAnalysis()
# define parameters for dynamic analysis
driftLimit = 0.20 # %
toler = 1.e-08
maxiter = 30
subSteps = 2
envdata = RunDynamicAnalysis(toler,maxiter,GM_dt,driftLimit,EDP_specs,subSteps,GMX_points,GMZ_points)
print(envdata)
return envdata
|
|
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Neo4j Implementation of the graph database.
"""
import json
import time
import logging
import copy
from networkx.readwrite import json_graph
from networkx import DiGraph
from py2neo import Graph, Relationship, Node, NodeSelector, watch
from landscaper.common import EOT_VALUE
from landscaper.common import IDEN_PROPS
from landscaper.common import LOG
from landscaper.graph_db import base
CONFIGURATION_SECTION = 'neo4j'
watch("neo4j.bolt", level=logging.ERROR)
watch("neo4j.http", level=logging.ERROR)
class Neo4jGDB(base.GraphDB):
"""
Class to persist the landscape to a neo4j database. Nothing is deleted in
this database. Instead edges between nodes are expired, in this way, we can
maintain a history of how the landscape has changed over time.
"""
def __init__(self, conf_manager):
super(Neo4jGDB, self).__init__(conf_manager)
# Grab configuration data.
self.conf_manager = conf_manager
self.conf_manager.add_section(CONFIGURATION_SECTION)
# Establish connection to the neo4j DB
self.connection_timeout = 3600 * 6
self.graph_db_refreshed = None
self.graph_db = self._get_db_connection()
def find(self, label, node_id):
"""
Returns true or false of whether the node with label exists.
:param label: node label name.
:param node_id: Node id.
:return: Returns true or false of whether the node with label exists.
"""
node = self.graph_db.find_one(label, property_key="name",
property_value=node_id)
if node:
return True
return False
def add_node(self, node_id, identity, state, timestmp):
"""
Add a node to the Neo4j database, which involves adding the identity
node and also the state node and then creating a relationship between
them.
:param node_id: The id of the identity node.
:param identity: The identity node.
:param state: State node.
:param timestmp: Epoch timestamp of when the node was created.
:return: An instance of the py2neo neo4j node.
"""
identity = _format_node(identity)
identity['name'] = node_id
iden_node = Node(identity.get('category', 'UNDEFINED'), **identity)
existing_node = self.get_node_by_uuid(node_id)
if existing_node:
LOG.warn("Node with UUID: %s already stored in DB", node_id)
return existing_node
# Store nodes to the database.
transaction = self.graph_db.begin()
state = _format_node(state)
state_label = identity.get('category', 'UNDEFINED') + '_state'
state_node = Node(state_label, **state)
state_rel = self._create_edge(iden_node, state_node, timestmp, "STATE")
transaction.create(iden_node)
transaction.create(state_node)
transaction.create(state_rel)
transaction.commit()
return iden_node
def update_node(self, node_id, timestamp, state=None, extra_attrs=None):
"""
Updating a node in the database involves expiring the old state node
and then creating a new state node and linking it to identity node
which is being updated.
:param additional_attributes:
:param node_id: The identity node id.
:param state: The new state.
:param timestamp: Epoch timestamp of when the update occurred.
:return: Instance of the identity node.
"""
state_attrs = None
identity = self.get_node_by_uuid(node_id)
if not identity:
umsg = "Node: %s. Node not in the landscape." % node_id
LOG.warn(umsg)
return (None, umsg)
if not state and not extra_attrs:
umsg = "Node: %s. No attributes supplied for update." % node_id
LOG.warn(umsg)
return (identity, umsg)
if state:
state_attrs = state
old_state = self._get_state_node(identity, time.time())
if not old_state:
umsg = "Can't update node: %s, as it is already expired." % node_id
LOG.warn(umsg)
return (identity, umsg)
old_node, old_edge = old_state
if extra_attrs:
if state_attrs:
state_attrs.update(extra_attrs)
else:
state_attrs = dict(old_node)
state_attrs.update(extra_attrs)
if state_attrs == dict(old_node):
umsg = "Node: %s. No update. Current state is identical" % node_id
LOG.warn(umsg)
return (identity, umsg)
# Create new state and edge to identity.
state_label = identity.get('category', 'UNDEFINED') + '_state'
state_node = Node(state_label, **state_attrs)
new_edge = self._create_edge(identity, state_node, timestamp, 'STATE')
# Expire old edge between identity and state.
self._expire_edge(old_edge, timestamp)
# Commit it all
self.graph_db.push(old_edge)
transaction = self.graph_db.begin()
transaction.create(new_edge)
transaction.commit()
umsg = "Node %s updated successfully" % node_id
return (identity, umsg)
def add_edge(self, src_node, dest_node, timestamp, label=None):
"""
Add an edge between two nodes and attach timestamp details as an
attribute, which details when the pointed to node was created, updated
or deleted.
:param src_node: The source node.
:param dest_node: The destination node.
:param timestamp: The epoch timestamp of when this edge was created.
:param label: Description of the edge.
:return: Instance of an edge.
"""
# Add link between src and dst nodes.
edge = self._create_edge(src_node, dest_node, timestamp, label)
if edge is not None and self.graph_db.exists(edge):
LOG.warn("Trying to add a relation already stored in the DB")
return edge
transaction = self.graph_db.begin()
transaction.create(edge)
transaction.commit()
return edge
def update_edge(self, src_node, dest_node, timestamp, label=None):
"""
Updates and edges timestamp attributes by expiring the old edge and
adding a new edge. The new edge will then highlight the time of an
update.
:param src_node: Source Node.
:param dest_node: Destination Node.
:param timestamp: Epoch timestamp for when the update occurred.
:param label: Edge Description.
:return: Edge instance.
"""
# Remove old edge
self.delete_edge(src_node, dest_node, timestamp, label)
# Create new edge
edge = self._create_edge(src_node, dest_node, timestamp, label)
transaction = self.graph_db.begin()
transaction.create(edge)
transaction.commit()
return edge
def delete_edge(self, src_node, dest_node, timestamp, label=None):
"""
Deletes an edge by expiring its 'to' attribute.
:param src_node: Source Node
:param dest_node: Destination Node
:param timestamp: epoch timestamp of when the edge was deleted.
:param label: Description of the edge.
:return: Instance of deleted edge.
"""
# Add link between src and dst nodes.
edge = self._get_edge(src_node, dest_node, timestamp, label)
if edge is not None and self.graph_db.exists(edge):
self._expire_edge(edge, timestamp)
self.graph_db.push(edge)
return edge
return None
def delete_node(self, identity, timestamp):
"""
A node is effectively deleted by expiring all inward and outward edges.
:param identity: Node to delete.
:param timestamp: epoch timestamp of when it was deleted.
"""
successors = self._get_successors(identity, timestamp=timestamp)
for _, successor_edge in successors:
self._expire_edge(successor_edge, timestamp)
self.graph_db.push(successor_edge)
predecessors = self._get_predecessors(identity, timestamp=timestamp)
for _, predecessor_edge in predecessors:
self._expire_edge(predecessor_edge, timestamp)
self.graph_db.push(predecessor_edge)
def predecessors(self, node):
"""
List of nodes which precede the given node.
:param node: Reference node
:return: List of nodes which precede the given node.
"""
return self._get_predecessors(node)
def successors(self, node):
"""
List of nodes which succeed the given node.
:param node: Reference node
:return: List of nodes which succeed the given node.
"""
return self._get_successors(node)
def get_node_by_properties_web(self, properties, start=None, timeframe=0):
"""
Return a list of nodes which match the given properties.
:param properties: A tuple with the key, value. Example: (k, v)
:return: A graph of matching nodes
"""
start = start or time.time()
if not properties:
return None
start = int(float(start))
timeframe = int(float(timeframe))
end = start + timeframe
# Build conditional query
conditional_query = ""
property_key = properties[0]
property_value = properties[1]
propery_operator = "="
condition = '(n.{0}{1}"{2}" OR s.{0}{1}"{2}")'.format(property_key,
propery_operator,
property_value)
conditional_query += condition
query = 'match (n)-[r:STATE]->(s) where {0} ' \
'AND (r.from <= {1} AND r.to > {2}) return n, s'\
.format(conditional_query, start, end)
graph = DiGraph()
for id_node, state_node in self.graph_db.run(query):
node = dict(id_node)
state_attributes = self._unique_attribute_names(IDEN_PROPS,
dict(state_node),
node["type"])
node.update(state_attributes)
graph.add_node(node["name"], node)
graph_json = json.dumps(json_graph.node_link_data(graph))
return graph_json
def get_nodes_by_properties(self, properties):
"""
:param properties: Dictionary of properties, keys and values.
:return: List of node instances.
"""
conditions = list()
for key in properties.keys():
conditions.append('_.{} = "{}"'.format(key, properties[key]))
selector = NodeSelector(self.graph_db)
selected = selector.select().where(*conditions)
return list(selected)
def get_node_by_uuid_web(self, uuid, json_out=True):
"""
Returns a networkx graph containing matching node.
:param uuid: The node name.
:return: Graph containing node or none.
"""
graph = DiGraph()
node_query = "match (i)-[r:STATE]->(s) where i.name='{}' and r.to>{}" \
" return i, s".format(uuid, str(time.time()))
query_result = self.graph_db.run(node_query)
result = list(query_result)
if result:
records = result[0]
node = dict(records[0])
state_attrs = dict(records[1])
if 'geo' in state_attrs:
state_attrs['geo'] = json.loads(state_attrs['geo'])
state_attributes = self._unique_attribute_names(IDEN_PROPS,
state_attrs,
node["type"])
node.update(state_attributes)
graph.add_node(uuid, node)
if json_out:
graph = json.dumps(json_graph.node_link_data(graph))
return graph
return None
def get_node_by_uuid(self, node_id):
"""
Retrieve a node from the neo4j database.
:param node_id: THe node to retrieve.
:return: The node
"""
selector = NodeSelector(self.graph_db)
selected = list(selector.select().where('_.name="{}"'.format(node_id)))
if selected:
return selected[0]
return None
def delete_all(self):
"""
Delete all nodes and edges from the database.
"""
self.graph_db.delete_all()
def _get_edge(self, src_node, dest_node, timestamp, label=None):
"""
Returns first edge which has not expired.
:param src_node: Source Node.
:param dest_node: Destination Node.
:param timestamp: Edge must hae been alive at this time.
:param label: Edge Description.
:return: Edge instance.
"""
timestamp = round(float(timestamp), 2)
edges = self.graph_db.match(src_node, label, dest_node)
for edge in edges:
edge_from = round(float(edge['from']), 2)
edge_to = float(edge['to'])
if edge_from <= timestamp and edge_to == EOT_VALUE:
return edge
return None
@staticmethod
def _create_edge(source_node, destination_node, timestamp, label):
"""
Creates a directed edge from the source node to the destination node.
:param source_node: Source Node.
:param destination_node: Destination Node.
:param timestamp: Time of source node creation.
:param label: Edge description.
:return: Returns newly created edge.
"""
edge = Relationship(source_node, label, destination_node)
edge["from"] = int(timestamp)
edge["to"] = int(EOT_VALUE)
return edge
@staticmethod
def _expire_edge(edge, timestamp):
"""
Expires the relationship. This effectively deletes the node which this
relationship was pointing to.
:param edge: Relationship to expire.
:param timestamp: Time at which the edge was expired.
:return: The expired edge.
"""
edge['to'] = int(timestamp)
return edge
def _get_state_node(self, identity_node, timestamp):
"""
Return the latest living state.
:param identity_node: The identity node which the state is attached to.
:param timestamp: Time at which the state should have been alive.
:return: The latest, living state node.
"""
states = self._get_successors(identity_node, 'STATE', timestamp)
if states:
return states[0]
return None
def _existing_relation(self, src_node, dst_node, timestamp, label=None):
end_nodes = self._get_successors(src_node, label=label,
timestamp=timestamp)
for end_node, relation in end_nodes:
dst_uuid = dst_node.dict().get('name', 'dst_uuid')
end_uuid = end_node.dict().get('name', 'end_uuid')
if dst_uuid == end_uuid:
return relation
return None
def _get_successors(self, identity_node, label=None, timestamp=None):
"""
Get a list of successors to a node. If no timestamp is provided then
all successors are returned.
:param identity_node: Start node.
:param label: Only return successors with this type of relationship.
:param timestamp: epoch timestamp. If used will only find living nodes.
:return List: List of successors.
"""
timestamp = round(float(timestamp), 2) if timestamp else timestamp
results = []
for edge in self.graph_db.match(identity_node, label):
if timestamp:
edge_from = round(float(edge['from']), 2)
edge_to = float(edge['to'])
# edge existed at timestamp and edge still alive.
if edge_from <= timestamp and edge_to == EOT_VALUE:
edge_end_node = edge.end_node()
results.append((edge_end_node, edge))
else:
edge_end_node = edge.end_node()
results.append((edge_end_node, edge))
return results
def _get_predecessors(self, identity_node, label=None, timestamp=None):
"""
Get a list of predecessors to a node. If no timestamp is provided then
all predecessors are returned.
:param identity_node: End node.
:param label: Only return predecessors with this type of relationship.
:param timestamp: epoch timestamp. If used will only find living nodes.
:return List: List of predecessors and their edges.
"""
results = []
timestamp = round(float(timestamp), 2) if timestamp else timestamp
edges_in = self.graph_db.match(end_node=identity_node, rel_type=label)
for edge in edges_in:
if timestamp:
edge_from = round(float(edge['from']), 2)
edge_to = float(edge['to'])
# edge existed at timestamp and edge still alive.
if edge_from <= timestamp and edge_to == EOT_VALUE:
edge_start_node = edge.start_node()
results.append((edge_start_node, edge))
else:
edge_start_node = edge.start_node()
results.append((edge_start_node, edge))
return results
def get_subgraph(self, node_id, timestmp=None, timeframe=0, json_out=True):
timestmp = timestmp or time.time()
result = DiGraph()
endtime = int(float(timestmp)) + int(float(timeframe))
tmp = 'MATCH (n)-[rels*]->(m) ' \
'WHERE n.name="{0}" AND ALL ' \
'(rel in rels WHERE rel.from <= {1} AND rel.to >= {2} ) ' \
'RETURN DISTINCT n, rels, m' \
''.format(node_id, str(timestmp), str(endtime))
LOG.info(tmp)
query_result = self.graph_db.run(tmp)
first = True
relations = list()
for record in query_result:
if first:
nodes_index = [0, 2]
first = False
else:
nodes_index = [2]
for i in nodes_index:
labels = list(record[i].labels())
if labels:
label = labels[0]
else:
label = 'state'
if 'state' not in label:
tmp = dict(record[i])
node_id = tmp.get('name', None)
result.add_node(node_id, tmp)
for relation in record[1]:
if relation.type() == 'STATE':
node_id = dict(relation.start_node()).get('name', None)
if node_id is not None:
if node_id in result.node:
prefix = result.node[node_id]["type"]
else:
prefix = "component"
state = dict(relation.end_node())
if 'geo' in state:
state['geo'] = json.loads(state['geo'])
state_attrs = self._unique_attribute_names(IDEN_PROPS,
state,
prefix)
result.add_node(node_id, state_attrs)
else:
relations.append(relation)
for rel in relations:
src_uuid = dict(rel.start_node()).get('name', 'None')
dst_uuid = dict(rel.end_node()).get('name', 'None')
if result.has_node(src_uuid) and result.has_node(dst_uuid):
label = rel.type()
rel_attr = dict(rel)
result.add_edge(src_uuid, dst_uuid, rel_attr, label=label)
if result.node:
if json_out:
js_gr = json_graph.node_link_data(result)
result = json.dumps(js_gr)
return result
return None
def get_graph(self, timestamp=None, timeframe=0, json_out=True):
if timestamp is None:
timestamp = time.time()
endtime = timestamp + timeframe
result = DiGraph()
tmp = 'MATCH (idnode)-[rs:STATE]->(statenode) WHERE (rs.from <= {0} ' \
'AND rs.to > {1}) ' \
'RETURN idnode, statenode;'.format(str(timestamp), str(endtime))
for idnode, statenode in self.graph_db.run(tmp):
uuid = dict(idnode).get('name', None)
if uuid is not None:
attr = dict(idnode)
state_attrs = dict(statenode)
if 'geo' in state_attrs:
state_attrs['geo'] = json.loads(state_attrs['geo'])
state_attributes = self._unique_attribute_names(IDEN_PROPS,
state_attrs,
attr["type"])
attr.update(state_attributes)
result.add_node(uuid, attr)
all_edges = "match ()-[r]-() where type(r) <> 'STATE' and r.from <= " \
"{0} and r.to > {1} return r".format(str(timestamp),
str(endtime))
for edge in self.graph_db.run(all_edges):
rel_attr = dict(edge[0])
src_uuid = edge[0].start_node()["name"]
dst_uuid = edge[0].end_node()["name"]
result.add_edge(src_uuid, dst_uuid, rel_attr, label=edge[0].type())
if json_out:
js_gr = json_graph.node_link_data(result)
result = json.dumps(js_gr)
return result
def __getattribute__(self, item):
if item == "graph_db" and self._connection_elapsed():
LOG.info("Refreshing connection.")
return self._get_db_connection()
return object.__getattribute__(self, item)
def _connection_elapsed(self):
"""
Returns true if the connection to the database has reached the timeout
set in the constructor.
:return: True if the connection timed out.
"""
elapsed_time = time.time() - self.graph_db_refreshed
if elapsed_time > self.connection_timeout:
return True
return False
def _get_db_connection(self):
"""
Returns a connection to the NEO4J Database.
:return: A connection to the NEO4J Database.
"""
url = self.conf_manager.get_neo4j_url()
user, password, use_bolt = self.conf_manager.get_neo4j_credentials()
self.graph_db_refreshed = time.time()
return Graph(url, user=user, password=password, bolt=use_bolt)
def _unique_attribute_names(self, immutable_keys, attributes, prefix):
"""
Ensures that the attributes do not contain the same keys as an key in
the immutable key list.
:param immutable_keys: List of keys that cannot be in attributes.
:param attributes: Dictionary of attributes.
:param prefix: Prefix for any key in attributes that clashes with the
immutable_keys.
:return: Attributes which are modified if they are clashing with
immutable_keys.
"""
attrs = copy.deepcopy(attributes)
matching_keys = set(immutable_keys).intersection(set(attrs))
for key in matching_keys:
unique_key = self._unique_key(key, attrs.keys(), prefix)
attrs[unique_key] = attrs.pop(key)
return attrs
@staticmethod
def _unique_key(clashing_key, keys, prefix):
"""
Returns a unique key from those already in keys.
:param clashing_key: THe key to rename.
:param keys: list of keys that the clashing key must be unique against.
:param prefix: prefix for the clashing key.
:return: Unique Key
:raise: AttributeError if a unique key cannot be generated.
"""
base_key = "{}-{}".format(prefix, clashing_key)
if base_key not in keys:
return base_key
for i in range(1, 100):
unique_key = "{}_{}".format(base_key, i)
if unique_key not in keys:
return unique_key
raise AttributeError("Unable to create unique attribute key")
def load_test_landscape(self, graph_file):
"""
Loads the test graph into the database, for integration tests.
:param graph: Networkx Test Graph.
"""
graph_data = json.load(open(graph_file))
graph = json_graph.node_link_graph(graph_data, directed=True)
node_lookup = {}
for node_id, node_data in graph.nodes(data=True):
node_attrs = _format_node(node_data)
if "_state_" in node_id:
category = node_id.split("_")[0] + "_state"
else:
category = node_attrs["category"]
node = Node(category, **node_attrs)
node_lookup[node_id] = node
rels = []
for src, dest, edge_attrs in graph.edges(data=True):
label = edge_attrs["label"]
del edge_attrs["label"]
src_node = node_lookup[src]
dest_node = node_lookup[dest]
rel = Relationship(src_node, label, dest_node, **edge_attrs)
rels.append(rel)
transaction = self.graph_db.begin()
for _, node_object in node_lookup.iteritems():
transaction.create(node_object)
for rel_object in rels:
transaction.create(rel_object)
transaction.commit()
def _format_node(node):
"""
Prepares a node for insertion into the graph database. Dictionaries cannot
be inserted.
:param node: Node to format.
:return: Formatted node.
"""
formatted_node = dict()
for prop in node:
if isinstance(node[prop], dict) or isinstance(node[prop], list):
formatted_node[prop] = json.dumps(node[prop])
else:
formatted_node[prop] = node[prop]
return formatted_node
def _attributes_equal(new_attributes, old_attributes):
"""
Compare attributes (dict) by value to determine if a state is changed
:param new_attributes: dict containing attributes
:param old_attributes: dict containing attributes
:return bool: result of the comparison between new_attributes and
old attributes
"""
for key in new_attributes:
if key not in old_attributes:
return False
elif new_attributes[key] != old_attributes[key]:
return False
return True
|
|
import torch
import numpy as np
def atomic_orbital_norm(basis):
"""Computes the norm of the atomic orbitals
Args:
basis (Namespace): basis object of the Molecule instance
Returns:
torch.tensor: Norm of the atomic orbitals
Examples::
>>> mol = Molecule('h2.xyz', basis='dzp', calculator='adf')
>>> norm = atomic_orbital_norm(mol.basis)
"""
# spherical
if basis.harmonics_type == 'sph':
if basis.radial_type.startswith('sto'):
return norm_slater_spherical(basis.bas_n, basis.bas_exp)
elif basis.radial_type.startswith('gto'):
return norm_gaussian_spherical(basis.bas_n, basis.bas_exp)
else:
raise ValueError('%s is not a valid radial_type')
# cartesian
elif basis.harmonics_type == 'cart':
if basis.radial_type.startswith('sto'):
return norm_slater_cartesian(
basis.bas_kx,
basis.bas_ky,
basis.bas_kz,
basis.bas_kr,
basis.bas_exp)
elif basis.radial_type.startswith('gto'):
return norm_gaussian_cartesian(
basis.bas_kx, basis.bas_ky, basis.bas_kz, basis.bas_exp)
else:
raise ValueError('%s is not a valid radial_type')
def norm_slater_spherical(bas_n, bas_exp):
"""Normalization of STOs with Sphecrical Harmonics. \n
* www.theochem.ru.nl/~pwormer/Knowino/knowino.org/wiki/Slater_orbital \n
* C Filippi, JCP 105, 213 1996 \n
* Monte Carlo Methods in Ab Inition Quantum Chemistry, B.L. Hammond
Args:
bas_n (torch.tensor): prinicpal quantum number
bas_exp (torch.tensor): slater exponents
Returns:
torch.tensor: normalization factor
"""
nfact = torch.as_tensor([np.math.factorial(2 * n)
for n in bas_n], dtype=torch.get_default_dtype())
return (2 * bas_exp)**bas_n * torch.sqrt(2 * bas_exp / nfact)
def norm_gaussian_spherical(bas_n, bas_exp):
"""Normlization of GTOs with spherical harmonics. \n
* Computational Quantum Chemistry: An interactive Intrduction to basis set theory \n
eq : 1.14 page 23.
Args:
bas_n (torch.tensor): prinicpal quantum number
bas_exp (torch.tensor): slater exponents
Returns:
torch.tensor: normalization factor
"""
from scipy.special import factorial2 as f2
bas_n = bas_n + 1.
exp1 = 0.25 * (2. * bas_n + 1.)
A = bas_exp**exp1
B = 2**(2. * bas_n + 3. / 2)
C = torch.as_tensor(f2(2 * bas_n.int() - 1) * np.pi **
0.5).type(torch.get_default_dtype())
return torch.sqrt(B / C) * A
def norm_slater_cartesian(a, b, c, n, exp):
"""Normaliation of STos with cartesian harmonics. \n
* Monte Carlo Methods in Ab Initio Quantum Chemistry page 279
Args:
a (torch.tensor): exponent of x
b (torch.tensor): exponent of y
c (torch.tensor): exponent of z
n (torch.tensor): exponent of r
exp (torch.tensor): Sater exponent
Returns:
torch.tensor: normalization factor
"""
from scipy.special import factorial2 as f2
lvals = a + b + c + n + 1.
lfact = torch.as_tensor([np.math.factorial(2 * i)
for i in lvals]).type(torch.get_default_dtype())
prefact = 4 * np.pi * lfact / ((2 * exp)**(2 * lvals + 1))
num = torch.as_tensor(f2(2 * a.astype('int') - 1) *
f2(2 * b.astype('int') - 1) *
f2(2 * c.astype('int') - 1)
).type(torch.get_default_dtype())
denom = torch.as_tensor(
f2((2 * a + 2 * b + 2 * c + 1).astype('int')
)).type(torch.get_default_dtype())
return torch.sqrt(1. / (prefact * num / denom))
def norm_gaussian_cartesian(a, b, c, exp):
"""Normaliation of GTOs with cartesian harmonics. \n
* Monte Carlo Methods in Ab Initio Quantum Chemistry page 279
Args:
a (torch.tensor): exponent of x
b (torch.tensor): exponent of y
c (torch.tensor): exponent of z
exp (torch.tensor): Sater exponent
Returns:
torch.tensor: normalization factor
"""
from scipy.special import factorial2 as f2
pref = torch.as_tensor((2 * exp / np.pi)**(0.75))
am1 = (2 * a - 1).astype('int')
x = (4 * exp)**(a / 2) / torch.sqrt(torch.as_tensor(f2(am1)))
bm1 = (2 * b - 1).astype('int')
y = (4 * exp)**(b / 2) / torch.sqrt(torch.as_tensor(f2(bm1)))
cm1 = (2 * c - 1).astype('int')
z = (4 * exp)**(c / 2) / torch.sqrt(torch.as_tensor(f2(cm1)))
return (pref * x * y * z).type(torch.get_default_dtype())
|
|
# -*- encoding: utf-8 -*-
"""
GLM solver tests using Kaggle datasets.
:copyright: 2017 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import time
import sys
import os
import numpy as np
import logging
import feather
print(sys.path)
from h2o4gpu.util.testing_utils import find_file, run_glm
logging.basicConfig(level=logging.DEBUG)
def fun(nGPUs=1, nFolds=1, nLambdas=100, nAlphas=8, validFraction=0.2, verbose=0,family="elasticnet", print_all_errors=False, tolerance=.001):
name = str(sys._getframe().f_code.co_name)
name = sys._getframe(1).f_code.co_name
t = time.time()
print("cwd: %s" % (os.getcwd()))
sys.stdout.flush()
print("Reading Data")
df = feather.read_dataframe("./data/bnp.feather")
print(df.shape)
X = np.array(df.iloc[:, :df.shape[1] - 1], dtype='float32', order='C')
y = np.array(df.iloc[:, df.shape[1] - 1], dtype='float32', order='C')
print("Y")
print(y)
t1 = time.time()
logloss_train, logloss_test = run_glm(X, y, nGPUs=nGPUs, nlambda=nLambdas, nfolds=nFolds, nalpha=nAlphas,
validFraction=validFraction, verbose=verbose,family=family,print_all_errors=print_all_errors,tolerance=tolerance, name=name)
# check logloss
print(logloss_train[0, 0])
print(logloss_train[0, 1])
print(logloss_train[0, 2])
print(logloss_test[0, 2])
sys.stdout.flush()
#Always checking the first 3 alphas with specific logloss scores (.48,.44)
if validFraction==0.0 and nFolds > 0:
assert logloss_train[0, 0] < .49
assert logloss_train[0, 1] < .49
assert logloss_train[1, 0] < .52
assert logloss_train[1, 1] < .52
assert logloss_train[2, 0] < .49
assert logloss_train[2, 1] < .49
if validFraction > 0.0:
assert logloss_train[0, 0] < .49
assert logloss_train[0, 1] < .49
assert logloss_train[0, 2] < .49
assert logloss_train[1, 0] < .50
assert logloss_train[1, 1] < .51
assert logloss_train[1, 2] < .51
assert logloss_train[2, 0] < .49
assert logloss_train[2, 1] < .49
assert logloss_train[2, 2] < .49
sys.stdout.flush()
print('/n Total execution time:%d' % (time.time() - t1))
print("TEST PASSED")
sys.stdout.flush()
print("Time taken: {}".format(time.time() - t))
print("DONE.")
sys.stdout.flush()
def test_glm_bnp_gpu_fold5_quick_train(): fun(nGPUs=1, nFolds=5, nLambdas=5, nAlphas=3, validFraction=0.0,verbose=0,family="logistic",print_all_errors=False, tolerance=.03)
def test_glm_bnp_gpu_fold5_quick_valid(): fun(nGPUs=1, nFolds=5, nLambdas=5, nAlphas=3, validFraction=0.2,verbose=0,family="logistic",print_all_errors=False, tolerance=.03)
if __name__ == '__main__':
test_glm_bnp_gpu_fold5_quick_train()
test_glm_bnp_gpu_fold5_quick_valid()
|
|
import re
import numpy as np
import vcfpy
from hgvs import edit
from ncls import NCLS
class GenomePosition():
genome_pos_pattern = re.compile(r"(.+):(\d+)-(\d+)")
def __init__(self, chrom, start, end):
self.chrom = chrom
self.start = start
self.end = end
@classmethod
def from_str(cls, pos_str):
match = cls.genome_pos_pattern.match(pos_str)
if not match:
return None
return cls(match[1], int(match[2]) - 1, int(match[3]))
@classmethod
def from_vcf_record(cls, record):
CHROM = record.CHROM.replace("chr", "")
affected_ranges = [
vcf_alt_affected_range(record.REF, alt) for alt in record.ALT
]
start = record.begin + min(map(lambda r: r.start, affected_ranges),
default=0)
end = record.begin + max(map(lambda r: r.stop, affected_ranges),
default=1)
return cls(CHROM, start, end)
@classmethod
def from_vcf_record_pos(cls, record):
CHROM = record.CHROM.replace("chr", "")
return cls(CHROM, record.begin, record.begin + 1)
@classmethod
def from_gtf_record(cls, record):
return cls(record[0].replace("chr", ""),
int(record[3]) - 1, int(record[4]))
def __eq__(self, other):
return self.chrom == other.chrom and self.start == other.start and \
self.end == other.end
def __repr__(self):
return "%s:%d-%d" % (self.chrom, self.start + 1, self.end)
def __str__(self):
return "%s:%d-%d" % (self.chrom, self.start + 1, self.end)
def __len__(self):
return self.end - self.start
def __contains__(self, other):
same_chrom = other.chrom == self.chrom
same_start = other.start >= self.start
same_end = other.end <= self.end
return same_chrom and same_start and same_end
def __and__(self, other):
if self.chrom != other.chrom:
return None
if other.start >= self.end or self.start >= other.end:
return None
return self.__class__(self.chrom, max(self.start, other.start),
min(self.end, other.end))
# FIXME: This method may be too overloaded...
def shifted_by(self, start, end=None):
if isinstance(start, range):
start, end = start.start, start.stop
if end is None:
end = start
return self.__class__(self.chrom, self.start + start, self.end + end)
def slice_within(self, other):
if self.chrom != other.chrom:
return None
if self.start < other.start or self.end > other.end:
return None
return slice(self.start - other.start, self.end - other.start)
class GenomeIntervalTree():
def __init__(self, predicate, records):
self.predicate = predicate
self.records = []
working_tree_map = {}
idx = 0
for record in records:
genome_pos = predicate(record)
if genome_pos is None:
continue
chrom = genome_pos.chrom
if chrom not in working_tree_map:
# (starts, ends, ids)
working_tree_map[chrom] = ([], [], [])
starts, ends, ids = working_tree_map[chrom]
starts.append(genome_pos.start)
ends.append(genome_pos.end)
ids.append(idx)
self.records.append(record)
idx += 1
tree_map = {}
for chrom, (starts, ends, ids) in working_tree_map.items():
tree_map[chrom] = NCLS(np.array(starts, dtype=np.long),
np.array(ends, dtype=np.long),
np.array(ids, dtype=np.long))
self.tree_map = tree_map
def _intervals(self, chrom):
return self.tree_map[chrom].intervals()
def _make_query_params(self, genome_pos_list):
starts = np.array([genome_pos.start for genome_pos in genome_pos_list])
ends = np.array([genome_pos.end for genome_pos in genome_pos_list])
ids = np.array(list(range(len(genome_pos_list))))
return (starts, ends, ids)
def _pick_best_record(self, from_ids=None, for_pos=None):
if len(from_ids) < 1:
return None
if len(from_ids) == 1:
return self.records[from_ids[0]]
records = [self.records[record_id] for record_id in from_ids]
scored_records = [(record,
self._compute_jaccard_index(for_pos,
self.predicate(record)))
for record in records]
sorted_records = sorted(scored_records,
key=lambda tup: tup[1],
reverse=True)
return sorted_records[0][0]
def _compute_jaccard_index(self, pos_a, pos_b):
intersection = pos_a & pos_b
if not intersection:
return 0
# The following is equivalent to |A ∩ B| / |A ∪ B|, but avoids
# computing a union.
# |A ∩ B| / (|A| + |B| - |A ∩ B|)
return len(intersection) / (len(pos_a) + len(pos_b) -
len(intersection))
def has_overlap(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return False
return tree.has_overlap(genome_pos.start, genome_pos.end)
def get_first_overlap(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return None
qparams = self._make_query_params([genome_pos])
_, record_ids = tree.first_overlap_both(*qparams)
if len(record_ids) < 1:
return None
return self.records[record_ids[0]]
def get_best_overlap(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return None
qparams = self._make_query_params([genome_pos])
_, record_ids = tree.all_overlaps_both(*qparams)
return self._pick_best_record(from_ids=record_ids, for_pos=genome_pos)
def get_all_overlaps(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return []
qparams = self._make_query_params([genome_pos])
_, record_ids = tree.all_overlaps_both(*qparams)
if any(map(lambda r: r >= len(self.records), record_ids)):
print("some VCF records may be malformed")
print("continuing...")
return [self.records[record_id] for record_id in record_ids]
def get_first_containment(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return None
qparams = self._make_query_params([genome_pos])
_, record_ids = tree.all_containments_both(*qparams)
if len(record_ids) < 1:
return None
return self.records[record_ids[0]]
def get_best_containment(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return None
qparams = self._make_query_params([genome_pos])
_, record_ids = tree.all_containments_both(*qparams)
return self._pick_best_record(from_ids=record_ids, for_pos=genome_pos)
def get_all_containments(self, genome_pos):
tree = self.tree_map.get(genome_pos.chrom)
if not tree:
return []
qparams = self._make_query_params([genome_pos])
_, record_ids = tree.all_containments_both(*qparams)
return [self.records[record_id] for record_id in record_ids]
class GFFFeature():
@classmethod
def parse_gff_attributes(cls, attr_str):
attr_dict = {}
for key, value in (kv_str.split(' ')
for kv_str in re.split('; ?', attr_str) if kv_str):
if '"' in value:
value = value[1:-1]
else:
value = int(value)
attr_dict[key] = value
return attr_dict
@property
def is_forward_stranded(self):
return self.strand == '+'
@property
def is_reverse_stranded(self):
return self.strand == '-'
def __init__(self, record):
self.pos = GenomePosition.from_gtf_record(record)
self.source = record[1]
self.type = record[2]
self.score = None if record[5] == '.' else float(record[5])
self.strand = record[6]
self.phase = None if record[7] == '.' else int(record[7])
self.attributes = self.parse_gff_attributes(record[8])
def vcf_alt_affected_range(ref, alt):
# TODO: This method currently only deals with simple substitutions.
if alt.type in [vcfpy.SNV, vcfpy.MNV]:
return range(len(ref))
elif alt.type == vcfpy.INS:
return range(2)
elif alt.type == vcfpy.DEL:
return range(1, len(ref))
elif alt.type == vcfpy.INDEL:
return range(len(ref))
raise NotImplementedError()
def _seqs_are_equal(seq_a, seq_b, wildcard=None):
if not len(seq_a) == len(seq_b):
return False
for a, b in zip(seq_a, seq_b):
if a == wildcard or b == wildcard:
continue
if not a == b:
return False
return True
# This could be extended for other types of `SequenceVariant`s in the future if
# needed.
def sequence_variants_are_equivalent(seqvar_a,
seqvar_b,
strict_uncertain=False,
strict_unknown=True,
strict_silent=False):
"""Check if `seqvar_a` and `seqvar_b` are equivalent.
Currently only works correctly for protein-level variants.
Parameters
---------
strict_uncertain : bool
True if variant (position/edit) uncertainty is factored into
this equivalency check. (default False)
strict_unknown : bool
True if unknown sequence units (e.g. 'X' for amino acids) should
not match known sequence units. (default True)
strict_silent : bool
True if synonymous variants (e.g. 'Arg17=') should not match
otherwise equivalent variants. (default False)
"""
if not seqvar_a.ac == seqvar_b.ac:
return False
if not seqvar_a.type == seqvar_b.type:
return False
sv_type = seqvar_a.type
if sv_type not in ["p"]:
raise NotImplementedError()
posedit_a, posedit_b = seqvar_a.posedit, seqvar_b.posedit
if (posedit_a is None) or (posedit_b is None):
return posedit_a is None and posedit_b is None
if strict_uncertain and not posedit_a.uncertain == posedit_b.uncertain:
return False
pos_a, pos_b = posedit_a.pos, posedit_b.pos
# TODO: Handle positional uncertainty
if not pos_a == pos_b:
return False
edit_a, edit_b = posedit_a.edit, posedit_b.edit
if not type(edit_a) is type(edit_b):
# print(type(edit_a), type(edit_b)) # for debugging purposes
return False
_seqs_cmp = lambda a, b: _seqs_are_equal(
a, b, wildcard=(None if strict_unknown else 'X'))
if isinstance(edit_a, (edit.AARefAlt, edit.AAFs, edit.AAExt)):
if (edit_a is None) or (edit_b is None):
return edit_a is None and edit_b is None
if not _seqs_cmp(edit_a.ref, edit_b.ref):
return False
if not _seqs_cmp(edit_a.alt, edit_b.alt):
return False
if strict_silent and (not edit_a.ref) and (not edit_a.alt):
return False
else:
raise NotImplementedError()
if isinstance(edit_a, (edit.AAFs, edit.AAExt)):
if not edit_a.length == edit_b.length:
return False
if isinstance(edit_b, (edit.AAExt)):
if not _seqs_cmp(edit_a.aaterm, edit_b.aaterm):
return False
return True
|
|
### tensorflow==2.3.0
### https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html
### https://google.github.io/mediapipe/solutions/pose
### https://www.tensorflow.org/api_docs/python/tf/keras/Model
### https://www.tensorflow.org/lite/guide/ops_compatibility
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer
### How to initialize a convolution layer with an arbitrary kernel in Keras? https://stackoverrun.com/ja/q/12269118
### saved_model_cli show --dir saved_model_full_pose_landmark_39kp/ --tag_set serve --signature_def serving_default
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, Add, ReLU, MaxPool2D, Reshape, Concatenate, Layer
from tensorflow.keras.initializers import Constant
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import numpy as np
import sys
# tmp = np.load('weights/depthwise_conv2d_Kernel')
# print(tmp.shape)
# print(tmp)
# def init_f(shape, dtype=None):
# ker = np.load('weights/depthwise_conv2d_Kernel')
# print(shape)
# return ker
# sys.exit(0)
inputs = Input(shape=(256, 256, 3), name='input')
# Block_01
conv1_1 = Conv2D(filters=24, kernel_size=[3, 3], strides=[2, 2], padding="same", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_Bias')))(inputs)
depthconv1_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_Bias')))(conv1_1)
conv1_2 = Conv2D(filters=24, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_1_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_1_Bias')))(depthconv1_1)
add1_1 = Add()([conv1_1, conv1_2])
relu1_1 = ReLU()(add1_1)
# Block_02
depthconv2_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_1_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_1_Bias')))(relu1_1)
conv2_1 = Conv2D(filters=24, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_2_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_2_Bias')))(depthconv2_1)
add2_1 = Add()([relu1_1, conv2_1])
relu2_1 = ReLU()(add2_1)
# Block_03
depthconv3_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_2_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_2_Bias')))(relu2_1)
conv3_1 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_3_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_3_Bias')))(depthconv3_1)
maxpool3_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(relu2_1)
pad3_1 = tf.pad(maxpool3_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_Paddings')))
add3_1 = Add()([conv3_1, pad3_1])
relu3_1 = ReLU()(add3_1)
# Block_04
depthconv4_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_3_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_3_Bias')))(relu3_1)
conv4_1 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_4_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_4_Bias')))(depthconv4_1)
add4_1 = Add()([relu3_1, conv4_1])
relu4_1 = ReLU()(add4_1)
# Block_05
depthconv5_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_4_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_4_Bias')))(relu4_1)
conv5_1 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_5_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_5_Bias')))(depthconv5_1)
add5_1 = Add()([relu4_1, conv5_1])
relu5_1 = ReLU()(add5_1)
# Block_06
depthconv6_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_5_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_5_Bias')))(relu5_1)
conv6_1 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_6_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_6_Bias')))(depthconv6_1)
add6_1 = Add()([relu5_1, conv6_1])
relu6_1 = ReLU()(add6_1)
# Block_07
depthconv7_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_6_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_6_Bias')))(relu6_1)
conv7_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_7_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_7_Bias')))(depthconv7_1)
maxpool7_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(relu6_1)
pad7_1 = tf.pad(maxpool7_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_1_Paddings')))
add7_1 = Add()([conv7_1, pad7_1])
relu7_1 = ReLU()(add7_1)
# Block_08
depthconv8_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_7_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_7_Bias')))(relu7_1)
conv8_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_8_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_8_Bias')))(depthconv8_1)
add8_1 = Add()([relu7_1, conv8_1])
relu8_1 = ReLU()(add8_1)
# Block_09
depthconv9_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_8_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_8_Bias')))(relu8_1)
conv9_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_9_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_9_Bias')))(depthconv9_1)
add9_1 = Add()([relu8_1, conv9_1])
relu9_1 = ReLU()(add9_1)
# Block_10
depthconv10_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_9_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_9_Bias')))(relu9_1)
conv10_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_10_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_10_Bias')))(depthconv10_1)
add10_1 = Add()([relu9_1, conv10_1])
relu10_1 = ReLU()(add10_1)
# Block_11
depthconv11_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_10_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_10_Bias')))(relu10_1)
conv11_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_11_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_11_Bias')))(depthconv11_1)
add11_1 = Add()([relu10_1, conv11_1])
relu11_1 = ReLU()(add11_1)
# Block_12
depthconv12_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_11_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_11_Bias')))(relu11_1)
conv12_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_12_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_12_Bias')))(depthconv12_1)
maxpool12_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(relu11_1)
pad12_1 = tf.pad(maxpool12_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_2_Paddings')))
add12_1 = Add()([conv12_1, pad12_1])
relu12_1 = ReLU()(add12_1)
# Block_13
depthconv13_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_12_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_12_Bias')))(relu12_1)
conv13_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_13_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_13_Bias')))(depthconv13_1)
add13_1 = Add()([relu12_1, conv13_1])
relu13_1 = ReLU()(add13_1)
# Block_14
depthconv14_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_13_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_13_Bias')))(relu13_1)
conv14_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_14_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_14_Bias')))(depthconv14_1)
add14_1 = Add()([relu13_1, conv14_1])
relu14_1 = ReLU()(add14_1)
# Block_15
depthconv15_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_14_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_14_Bias')))(relu14_1)
conv15_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_15_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_15_Bias')))(depthconv15_1)
add15_1 = Add()([relu14_1, conv15_1])
relu15_1 = ReLU()(add15_1)
# Block_16
depthconv16_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_15_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_15_Bias')))(relu15_1)
conv16_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_16_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_16_Bias')))(depthconv16_1)
add16_1 = Add()([relu15_1, conv16_1])
relu16_1 = ReLU()(add16_1)
# Block_17
depthconv17_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_16_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_16_Bias')))(relu16_1)
conv17_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_17_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_17_Bias')))(depthconv17_1)
add17_1 = Add()([relu16_1, conv17_1])
relu17_1 = ReLU()(add17_1)
# Block_18
depthconv18_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_17_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_17_Bias')))(relu17_1)
conv18_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_18_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_18_Bias')))(depthconv18_1)
maxpool18_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(relu17_1)
pad18_1 = tf.pad(maxpool18_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_3_Paddings')))
add18_1 = Add()([conv18_1, pad18_1])
relu18_1 = ReLU()(add18_1)
# Block_19
depthconv19_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_18_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_18_Bias')))(relu18_1)
conv19_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_19_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_19_Bias')))(depthconv19_1)
add19_1 = Add()([relu18_1, conv19_1])
relu19_1 = ReLU()(add19_1)
# Block_20
depthconv20_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_19_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_19_Bias')))(relu19_1)
conv20_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_20_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_20_Bias')))(depthconv20_1)
add20_1 = Add()([relu19_1, conv20_1])
relu20_1 = ReLU()(add20_1)
# Block_21
depthconv21_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_20_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_20_Bias')))(relu20_1)
conv21_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_21_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_21_Bias')))(depthconv21_1)
add21_1 = Add()([relu20_1, conv21_1])
relu21_1 = ReLU()(add21_1)
# Block_22
depthconv22_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_21_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_21_Bias')))(relu21_1)
conv22_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_22_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_22_Bias')))(depthconv22_1)
add22_1 = Add()([relu21_1, conv22_1])
relu22_1 = ReLU()(add22_1)
# Block_23
depthconv23_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_22_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_22_Bias')))(relu22_1)
conv23_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_23_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_23_Bias')))(depthconv23_1)
add23_1 = Add()([relu22_1, conv23_1])
relu23_1 = ReLU()(add23_1)
# Block_24
depthconv24_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_23_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_23_Bias')))(relu23_1)
conv24_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_24_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_24_Bias')))(depthconv24_1)
add24_1 = Add()([relu23_1, conv24_1])
relu24_1 = ReLU()(add24_1)
# Block_25
depthconv25_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_24_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_24_Bias')))(relu24_1)
conv25_1 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_25_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_25_Bias')))(depthconv25_1)
resize25_1 = tf.image.resize(conv25_1, np.load('weights_full_land/up_sampling2d_Size'))
depthconv25_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_25_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_25_Bias')))(relu17_1)
conv25_2 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_26_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_26_Bias')))(depthconv25_2)
add25_1 = Add()([resize25_1, conv25_2])
resize25_2 = tf.image.resize(add25_1, np.load('weights_full_land/up_sampling2d_1_Size'))
depthconv25_3 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_26_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_26_Bias')))(relu11_1)
conv25_3 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_27_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_27_Bias')))(depthconv25_3)
add25_2 = Add()([resize25_2, conv25_3])
resize25_3 = tf.image.resize(add25_2, np.load('weights_full_land/up_sampling2d_2_Size'))
depthconv25_4 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_27_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_27_Bias')))(relu6_1)
conv25_4 = Conv2D(filters=48, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_28_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_28_Bias')))(depthconv25_4)
add25_3 = Add()([resize25_3, conv25_4])
# Block_26
depthconv26_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_28_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_28_Bias')))(add25_3)
conv26_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_29_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_29_Bias')))(depthconv26_1)
maxpool26_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(add25_3)
pad26_1 = tf.pad(maxpool26_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_4_Paddings')))
add26_1 = Add()([conv26_1, pad26_1])
relu26_1 = ReLU()(add26_1)
# Block_27
depthconv27_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_29_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_29_Bias')))(relu26_1)
conv27_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_30_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_30_Bias')))(depthconv27_1)
add27_1 = Add()([relu26_1, conv27_1])
relu27_1 = ReLU()(add27_1)
# Block_28
depthconv28_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_30_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_30_Bias')))(relu27_1)
conv28_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_31_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_31_Bias')))(depthconv28_1)
add28_1 = Add()([relu27_1, conv28_1])
relu28_1 = ReLU()(add28_1)
# Block_29
depthconv29_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_31_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_31_Bias')))(relu28_1)
conv29_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_32_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_32_Bias')))(depthconv29_1)
add29_1 = Add()([relu28_1, conv29_1])
relu29_1 = ReLU()(add29_1)
# Block_30
depthconv30_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_32_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_32_Bias')))(relu29_1)
conv30_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_33_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_33_Bias')))(depthconv30_1)
add30_1 = Add()([relu29_1, conv30_1])
relu30_1 = ReLU()(add30_1)
# Block_31
depthconv31_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_33_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_33_Bias')))(relu11_1)
conv31_1 = Conv2D(filters=96, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_34_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_34_Bias')))(depthconv31_1)
add31_1 = Add()([relu30_1, conv31_1])
# Block_32
depthconv32_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_34_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_34_Bias')))(add31_1)
conv32_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_35_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_35_Bias')))(depthconv32_1)
maxpool32_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(add31_1)
pad32_1 = tf.pad(maxpool32_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_5_Paddings')))
add32_1 = Add()([conv32_1, pad32_1])
relu32_1 = ReLU()(add32_1)
# Block_33
depthconv33_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_35_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_35_Bias')))(relu32_1)
conv33_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_36_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_36_Bias')))(depthconv33_1)
add33_1 = Add()([relu32_1, conv33_1])
relu33_1 = ReLU()(add33_1)
# Block_34
depthconv34_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_36_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_36_Bias')))(relu33_1)
conv34_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_37_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_37_Bias')))(depthconv34_1)
add34_1 = Add()([relu33_1, conv34_1])
relu34_1 = ReLU()(add34_1)
# Block_35
depthconv35_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_37_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_37_Bias')))(relu34_1)
conv35_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_38_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_38_Bias')))(depthconv35_1)
add35_1 = Add()([relu34_1, conv35_1])
relu35_1 = ReLU()(add35_1)
# Block_36
depthconv36_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_38_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_38_Bias')))(relu35_1)
conv36_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_39_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_39_Bias')))(depthconv36_1)
add36_1 = Add()([relu35_1, conv36_1])
relu36_1 = ReLU()(add36_1)
# Block_37
depthconv37_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_39_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_39_Bias')))(relu36_1)
conv37_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_40_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_40_Bias')))(depthconv37_1)
add37_1 = Add()([relu36_1, conv37_1])
relu37_1 = ReLU()(add37_1)
# Block_38
depthconv38_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_40_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_40_Bias')))(relu17_1)
conv38_1 = Conv2D(filters=192, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_41_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_41_Bias')))(depthconv38_1)
add38_1 = Add()([conv38_1, relu37_1])
# Block_39
depthconv39_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_41_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_41_Bias')))(add38_1)
conv39_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_42_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_42_Bias')))(depthconv39_1)
maxpool39_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(add38_1)
pad39_1 = tf.pad(maxpool39_1, paddings=tf.constant(np.load('weights_full_land/channel_padding_6_Paddings')))
add39_1 = Add()([conv39_1, pad39_1])
relu39_1 = ReLU()(add39_1)
# Block_40
depthconv40_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_42_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_42_Bias')))(relu39_1)
conv40_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_43_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_43_Bias')))(depthconv40_1)
add40_1 = Add()([relu39_1, conv40_1])
relu40_1 = ReLU()(add40_1)
# Block_41
depthconv41_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_43_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_43_Bias')))(relu40_1)
conv41_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_44_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_44_Bias')))(depthconv41_1)
add41_1 = Add()([relu40_1, conv41_1])
relu41_1 = ReLU()(add41_1)
# Block_42
depthconv42_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_44_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_44_Bias')))(relu41_1)
conv42_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_45_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_45_Bias')))(depthconv42_1)
add42_1 = Add()([relu41_1, conv42_1])
relu42_1 = ReLU()(add42_1)
# Block_43
depthconv43_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_45_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_45_Bias')))(relu42_1)
conv43_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_46_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_46_Bias')))(depthconv43_1)
add43_1 = Add()([relu42_1, conv43_1])
relu43_1 = ReLU()(add43_1)
# Block_44
depthconv44_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_46_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_46_Bias')))(relu43_1)
conv44_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_47_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_47_Bias')))(depthconv44_1)
add44_1 = Add()([relu43_1, conv44_1])
relu44_1 = ReLU()(add44_1)
# Block_45
depthconv45_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_47_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_47_Bias')))(relu44_1)
conv45_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_48_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_48_Bias')))(depthconv45_1)
add45_1 = Add()([relu44_1, conv45_1])
relu45_1 = ReLU()(add45_1)
# Block_46
depthconv46_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_48_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_48_Bias')))(relu24_1)
conv46_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_49_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_49_Bias')))(depthconv46_1)
add46_1 = Add()([conv46_1, relu45_1])
# Block_47
depthconv47_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_49_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_49_Bias')))(add46_1)
conv47_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_50_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_50_Bias')))(depthconv47_1)
maxpool47_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(add46_1)
add47_1 = Add()([conv47_1, maxpool47_1])
relu47_1 = ReLU()(add47_1)
# Block_48
depthconv48_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_50_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_50_Bias')))(relu47_1)
conv48_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_51_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_51_Bias')))(depthconv48_1)
add48_1 = Add()([conv48_1, relu47_1])
relu48_1 = ReLU()(add48_1)
# Block_49
depthconv49_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_51_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_51_Bias')))(relu48_1)
conv49_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_52_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_52_Bias')))(depthconv49_1)
add49_1 = Add()([conv49_1, relu48_1])
relu49_1 = ReLU()(add49_1)
# Block_50
depthconv50_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_52_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_52_Bias')))(relu49_1)
conv50_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_53_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_53_Bias')))(depthconv50_1)
add50_1 = Add()([conv50_1, relu49_1])
relu50_1 = ReLU()(add50_1)
# Block_51
depthconv51_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_53_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_53_Bias')))(relu50_1)
conv51_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_54_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_54_Bias')))(depthconv51_1)
add51_1 = Add()([conv51_1, relu50_1])
relu51_1 = ReLU()(add51_1)
# Block_52
depthconv52_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_54_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_54_Bias')))(relu51_1)
conv52_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_55_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_55_Bias')))(depthconv52_1)
add52_1 = Add()([conv52_1, relu51_1])
relu52_1 = ReLU()(add52_1)
# Block_53
depthconv53_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_55_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_55_Bias')))(relu52_1)
conv53_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_56_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_56_Bias')))(depthconv53_1)
add53_1 = Add()([conv53_1, relu52_1])
relu53_1 = ReLU()(add53_1)
# Block_54
depthconv54_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_56_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_56_Bias')))(relu53_1)
conv54_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_57_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_57_Bias')))(depthconv54_1)
add54_1 = Add()([conv54_1, relu53_1])
relu54_1 = ReLU()(add54_1)
# Block_55
depthconv55_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_57_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_57_Bias')))(relu54_1)
conv55_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_58_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_58_Bias')))(depthconv55_1)
maxpool55_1 = MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(relu54_1)
add55_1 = Add()([conv55_1, maxpool55_1])
relu55_1 = ReLU()(add55_1)
# Block_56
depthconv56_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_58_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_58_Bias')))(relu55_1)
conv56_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_59_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_59_Bias')))(depthconv56_1)
add56_1 = Add()([conv56_1, relu55_1])
relu56_1 = ReLU()(add56_1)
# Block_57
depthconv57_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_59_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_59_Bias')))(relu56_1)
conv57_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_60_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_60_Bias')))(depthconv57_1)
add57_1 = Add()([conv57_1, relu56_1])
relu57_1 = ReLU()(add57_1)
# Block_58
depthconv58_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_60_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_60_Bias')))(relu57_1)
conv58_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_61_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_61_Bias')))(depthconv58_1)
add58_1 = Add()([conv58_1, relu57_1])
relu58_1 = ReLU()(add58_1)
# Block_59
depthconv59_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_61_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_61_Bias')))(relu58_1)
conv59_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_62_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_62_Bias')))(depthconv59_1)
add59_1 = Add()([conv59_1, relu58_1])
relu59_1 = ReLU()(add59_1)
# Block_60
depthconv60_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_62_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_62_Bias')))(relu59_1)
conv60_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_63_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_63_Bias')))(depthconv60_1)
add60_1 = Add()([conv60_1, relu59_1])
relu60_1 = ReLU()(add60_1)
# Block_61
depthconv61_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_63_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_63_Bias')))(relu60_1)
conv61_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_64_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_64_Bias')))(depthconv61_1)
add61_1 = Add()([conv61_1, relu60_1])
relu61_1 = ReLU()(add61_1)
# Block_62
depthconv62_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_64_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_64_Bias')))(relu61_1)
conv62_1 = Conv2D(filters=288, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv2d_65_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_65_Bias')))(depthconv62_1)
add62_1 = Add()([conv62_1, relu61_1])
relu62_1 = ReLU()(add62_1)
# Block_63
depthconv63_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_66_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_66_Bias')))(add25_3)
conv63_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_67_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_67_Bias')))(depthconv63_1)
resize63_1 = tf.image.resize(conv63_1, np.load('weights_full_land/up_sampling2d_3_Size'))
depthconv63_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_65_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_65_Bias')))(relu2_1)
conv63_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_66_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_66_Bias')))(depthconv63_2)
add63_1 = Add()([resize63_1, conv63_2])
depthconv63_3 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_67_Kernel')),
bias_initializer=Constant(np.load('weights_full_land/depthwise_conv2d_67_Bias')))(add63_1)
conv63_3 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1], activation='relu',
kernel_initializer=Constant(np.load('weights_full_land/conv2d_68_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv2d_68_Bias')))(depthconv63_3)
# Final Block_99
conv99_1 = Conv2D(filters=1, kernel_size=[3, 3], strides=[1, 1], padding="same", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/output_segmentation_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/output_segmentation_Bias')), name='output_segmentation')(conv63_3)
conv99_2 = Conv2D(filters=1, kernel_size=[2, 2], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/conv_poseflag_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/conv_poseflag_Bias')))(relu62_1)
sigm99_1 = tf.math.sigmoid(conv99_2, name='output_poseflag')
# reshape99_1 = tf.reshape(sigm99_1, (1, 1), name='output_poseflag')
conv99_3 = Conv2D(filters=156, kernel_size=[2, 2], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights_full_land/convld_3d_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights_full_land/convld_3d_Bias')))(relu62_1)
reshape99_2 = tf.reshape(conv99_3, (1, 156), name='ld_3d')
model = Model(inputs=inputs, outputs=[conv99_1, sigm99_1, reshape99_2])
model.summary()
tf.saved_model.save(model, 'saved_model_full_pose_landmark_39kp')
model.save('full_pose_landmark_39kp.h5')
full_model = tf.function(lambda inputs: model(inputs))
full_model = full_model.get_concrete_function(inputs = (tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)))
frozen_func = convert_variables_to_constants_v2(full_model, lower_control_flow=False)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=".",
name="full_pose_landmark_39kp_256x256_float32.pb",
as_text=False)
# No Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('full_pose_landmark_39kp_256x256_float32.tflite', 'wb') as w:
w.write(tflite_model)
print("tflite convert complete! - full_pose_landmark_39kp_256x256_float32.tflite")
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with open('full_pose_landmark_39kp_256x256_weight_quant.tflite', 'wb') as w:
w.write(tflite_model)
print("Weight Quantization complete! - full_pose_landmark_39kp_256x256_weight_quant.tflite")
def representative_dataset_gen():
for image in raw_test_data:
image = tf.image.resize(image, (256, 256))
image = image[np.newaxis,:,:,:]
yield [image]
raw_test_data = np.load('calibration_data_img_person.npy', allow_pickle=True)
# Integer Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('full_pose_landmark_39kp_256x256_integer_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Integer Quantization complete! - full_pose_landmark_39kp_256x256_integer_quant.tflite")
# Full Integer Quantization - Input/Output=int8
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('full_pose_landmark_39kp_256x256_full_integer_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Full Integer Quantization complete! - full_pose_landmark_39kp_256x256_full_integer_quant.tflite")
# Float16 Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
with open('full_pose_landmark_39kp_256x256_float16_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Float16 Quantization complete! - full_pose_landmark_39kp_256x256_float16_quant.tflite")
# EdgeTPU
import subprocess
result = subprocess.check_output(["edgetpu_compiler", "-s", "full_pose_landmark_39kp_256x256_full_integer_quant.tflite"])
print(result)
|
|
# compute FOM sol for test values of parameters mu
import numpy as np
import os,setrun
mu_test = np.loadtxt("../_output/mu_test.txt") # parameters
ts_test = np.loadtxt("../_output/ts_test.txt") # no of time-steps
L = mu_test.shape[0]
r = 0
n = 14 + r
h = 10.0 / 2**n
nu = 0.5
dt = h*nu
for l in range(0,L):
print( "="*60 + "l={:d}".format(l))
rundata = setrun.setrun()
mu1 = mu_test[l,0]
mu2 = mu_test[l,1]
nts = int(ts_test[l]) # no of time-steps
rundata.clawdata.num_cells[0] = 2**n # mx
rundata.clawdata.order = 1
rundata.clawdata.output_style = 1
rundata.clawdata.num_output_times = nts
rundata.clawdata.tfinal = nts*dt*(2**r)
read_times = np.arange(1,nts) # clawpack outputs one more file?
rundata.probdata.mu1 = mu1
rundata.probdata.mu2 = mu2
data_list = [np.zeros(2**n)]*nts
times_list = [0.0]*nts
os.system('rm -f .data')
rundata.write()
os.system('touch .data')
os.system('make output')
for k in read_times:
data = np.loadtxt('_output/fort.q{:04}'.format(k),skiprows=6)
data_list[k] = data
with open("_output/fort.t{:04}".format(k)) as infile:
times = infile.readlines()
time = float(times[0].split()[0])
times_list[k] = time
data_all = np.vstack(data_list).T
np.save('../_output/fom_test_{:02}.npy'.format(l),data_all)
np.save('../_output/fom_test_times_{:02}.npy'.format(l),times_list)
|
|
"""
Contains classes that convert from RGB to various other color spaces and back.
"""
import torch
import torch.nn as nn
from .mister_ed.utils import pytorch_utils as utils
from torch.autograd import Variable
import numpy as np
from recoloradv import norms
import math
class ColorSpace(object):
"""
Base class for color spaces.
"""
def from_rgb(self, imgs):
"""
Converts an Nx3xWxH tensor in RGB color space to a Nx3xWxH tensor in
this color space. All outputs should be in the 0-1 range.
"""
raise NotImplementedError()
def to_rgb(self, imgs):
"""
Converts an Nx3xWxH tensor in this color space to a Nx3xWxH tensor in
RGB color space.
"""
raise NotImplementedError()
class RGBColorSpace(ColorSpace):
"""
RGB color space. Just applies identity transformation.
"""
def from_rgb(self, imgs):
return imgs
def to_rgb(self, imgs):
return imgs
class YPbPrColorSpace(ColorSpace):
"""
YPbPr color space. Uses ITU-R BT.601 standard by default.
"""
def __init__(self, kr=0.299, kg=0.587, kb=0.114, luma_factor=1,
chroma_factor=1):
self.kr, self.kg, self.kb = kr, kg, kb
self.luma_factor = luma_factor
self.chroma_factor = chroma_factor
def from_rgb(self, imgs):
r, g, b = imgs.permute(1, 0, 2, 3)
y = r * self.kr + g * self.kg + b * self.kb
pb = (b - y) / (2 * (1 - self.kb))
pr = (r - y) / (2 * (1 - self.kr))
return torch.stack([y * self.luma_factor,
pb * self.chroma_factor + 0.5,
pr * self.chroma_factor + 0.5], 1)
def to_rgb(self, imgs):
y_prime, pb_prime, pr_prime = imgs.permute(1, 0, 2, 3)
y = y_prime / self.luma_factor
pb = (pb_prime - 0.5) / self.chroma_factor
pr = (pr_prime - 0.5) / self.chroma_factor
b = pb * 2 * (1 - self.kb) + y
r = pr * 2 * (1 - self.kr) + y
g = (y - r * self.kr - b * self.kb) / self.kg
return torch.stack([r, g, b], 1).clamp(0, 1)
class ApproxHSVColorSpace(ColorSpace):
"""
Converts from RGB to approximately the HSV cone using a much smoother
transformation.
"""
def from_rgb(self, imgs):
r, g, b = imgs.permute(1, 0, 2, 3)
x = r * np.sqrt(2) / 3 - g / (np.sqrt(2) * 3) - b / (np.sqrt(2) * 3)
y = g / np.sqrt(6) - b / np.sqrt(6)
z, _ = imgs.max(1)
return torch.stack([z, x + 0.5, y + 0.5], 1)
def to_rgb(self, imgs):
z, xp, yp = imgs.permute(1, 0, 2, 3)
x, y = xp - 0.5, yp - 0.5
rp = float(np.sqrt(2)) * x
gp = -x / np.sqrt(2) + y * np.sqrt(3 / 2)
bp = -x / np.sqrt(2) - y * np.sqrt(3 / 2)
delta = z - torch.max(torch.stack([rp, gp, bp], 1), 1)[0]
r, g, b = rp + delta, gp + delta, bp + delta
return torch.stack([r, g, b], 1).clamp(0, 1)
class HSVConeColorSpace(ColorSpace):
"""
Converts from RGB to the HSV "cone", where (x, y, z) =
(s * v cos h, s * v sin h, v). Note that this cone is then squashed to fit
in [0, 1]^3 by letting (x', y', z') = ((x + 1) / 2, (y + 1) / 2, z).
WARNING: has a very complex derivative, not very useful in practice
"""
def from_rgb(self, imgs):
r, g, b = imgs.permute(1, 0, 2, 3)
mx, argmx = imgs.max(1)
mn, _ = imgs.min(1)
chroma = mx - mn
eps = 1e-10
h_max_r = math.pi / 3 * (g - b) / (chroma + eps)
h_max_g = math.pi / 3 * (b - r) / (chroma + eps) + math.pi * 2 / 3
h_max_b = math.pi / 3 * (r - g) / (chroma + eps) + math.pi * 4 / 3
h = (((argmx == 0) & (chroma != 0)).float() * h_max_r
+ ((argmx == 1) & (chroma != 0)).float() * h_max_g
+ ((argmx == 2) & (chroma != 0)).float() * h_max_b)
x = torch.cos(h) * chroma
y = torch.sin(h) * chroma
z = mx
return torch.stack([(x + 1) / 2, (y + 1) / 2, z], 1)
def _to_rgb_part(self, h, chroma, v, n):
"""
Implements the function f(n) defined here:
https://en.wikipedia.org/wiki/HSL_and_HSV#Alternative_HSV_to_RGB
"""
k = (n + h * math.pi / 3) % 6
return v - chroma * torch.min(k, 4 - k).clamp(0, 1)
def to_rgb(self, imgs):
xp, yp, z = imgs.permute(1, 0, 2, 3)
x, y = xp * 2 - 1, yp * 2 - 1
# prevent NaN gradients when calculating atan2
x_nonzero = (1 - 2 * (torch.sign(x) == -1).float()) * (torch.abs(x) + 1e-10)
h = torch.atan2(y, x_nonzero)
v = z.clamp(0, 1)
chroma = torch.min(torch.sqrt(x ** 2 + y ** 2 + 1e-10), v)
r = self._to_rgb_part(h, chroma, v, 5)
g = self._to_rgb_part(h, chroma, v, 3)
b = self._to_rgb_part(h, chroma, v, 1)
return torch.stack([r, g, b], 1).clamp(0, 1)
class CIEXYZColorSpace(ColorSpace):
"""
The 1931 CIE XYZ color space (assuming input is in sRGB).
Warning: may have values outside [0, 1] range. Should only be used in
the process of converting to/from other color spaces.
"""
def from_rgb(self, imgs):
# apply gamma correction
small_values_mask = (imgs < 0.04045).float()
imgs_corrected = (
(imgs / 12.92) * small_values_mask +
((imgs + 0.055) / 1.055) ** 2.4 * (1 - small_values_mask)
)
# linear transformation to XYZ
r, g, b = imgs_corrected.permute(1, 0, 2, 3)
x = 0.4124 * r + 0.3576 * g + 0.1805 * b
y = 0.2126 * r + 0.7152 * g + 0.0722 * b
z = 0.0193 * r + 0.1192 * g + 0.9504 * b
return torch.stack([x, y, z], 1)
def to_rgb(self, imgs):
# linear transformation
x, y, z = imgs.permute(1, 0, 2, 3)
r = 3.2406 * x - 1.5372 * y - 0.4986 * z
g = -0.9689 * x + 1.8758 * y + 0.0415 * z
b = 0.0557 * x - 0.2040 * y + 1.0570 * z
imgs = torch.stack([r, g, b], 1)
# apply gamma correction
small_values_mask = (imgs < 0.0031308).float()
imgs_clamped = imgs.clamp(min=1e-10) # prevent NaN gradients
imgs_corrected = (
(12.92 * imgs) * small_values_mask +
(1.055 * imgs_clamped ** (1 / 2.4) - 0.055) *
(1 - small_values_mask)
)
return imgs_corrected
class CIELUVColorSpace(ColorSpace):
"""
Converts to the 1976 CIE L*u*v* color space.
"""
def __init__(self, up_white=0.1978, vp_white=0.4683, y_white=1,
eps=1e-10):
self.xyz_cspace = CIEXYZColorSpace()
self.up_white = up_white
self.vp_white = vp_white
self.y_white = y_white
self.eps = eps
def from_rgb(self, imgs):
x, y, z = self.xyz_cspace.from_rgb(imgs).permute(1, 0, 2, 3)
# calculate u' and v'
denom = x + 15 * y + 3 * z + self.eps
up = 4 * x / denom
vp = 9 * y / denom
# calculate L*, u*, and v*
small_values_mask = (y / self.y_white < (6 / 29) ** 3).float()
y_clamped = y.clamp(min=self.eps) # prevent NaN gradients
L = (
((29 / 3) ** 3 * y / self.y_white) * small_values_mask +
(116 * (y_clamped / self.y_white) ** (1 / 3) - 16) *
(1 - small_values_mask)
)
u = 13 * L * (up - self.up_white)
v = 13 * L * (vp - self.vp_white)
return torch.stack([L / 100, (u + 100) / 200, (v + 100) / 200], 1)
def to_rgb(self, imgs):
L = imgs[:, 0, :, :] * 100
u = imgs[:, 1, :, :] * 200 - 100
v = imgs[:, 2, :, :] * 200 - 100
up = u / (13 * L + self.eps) + self.up_white
vp = v / (13 * L + self.eps) + self.vp_white
small_values_mask = (L <= 8).float()
y = (
(self.y_white * L * (3 / 29) ** 3) * small_values_mask +
(self.y_white * ((L + 16) / 116) ** 3) * (1 - small_values_mask)
)
denom = 4 * vp + self.eps
x = y * 9 * up / denom
z = y * (12 - 3 * up - 20 * vp) / denom
return self.xyz_cspace.to_rgb(
torch.stack([x, y, z], 1).clamp(0, 1.1)).clamp(0, 1)
|
|
import matplotlib.pyplot as plt
import numpy as np
"""
Plot RMS for x-/y-/z-signal vs 1/3 octave band frequencies and compare to VC curves.
"""
# rms_x_all = np.loadtxt("14208_betacampus_pos1_rms_x_all.txt")
# rms_y_all = np.loadtxt("14208_betacampus_pos1_rms_y_all.txt")
# rms_z_all = np.loadtxt("14208_betacampus_pos1_rms_z_all.txt")
# rms_x_all = np.loadtxt("14208_betacampus_pos2_rms_x_all.txt")
# rms_y_all = np.loadtxt("14208_betacampus_pos2_rms_y_all.txt")
# rms_z_all = np.loadtxt("14208_betacampus_pos2_rms_z_all.txt")
# rms_x_all = np.loadtxt("14208_Huygensgebouw_rms_x_all.txt")
# rms_y_all = np.loadtxt("14208_Huygensgebouw_rms_y_all.txt")
# rms_z_all = np.loadtxt("14208_Huygensgebouw_rms_z_all.txt")
# rms_x_all = np.loadtxt("14208_betacampus_pos1_rms_x_weekday.txt")
# rms_y_all = np.loadtxt("14208_betacampus_pos1_rms_y_weekday.txt")
# rms_z_all = np.loadtxt("14208_betacampus_pos1_rms_z_weekday.txt")
# rms_x_all = np.loadtxt("14208_betacampus_pos2_rms_x_weekday.txt")
# rms_y_all = np.loadtxt("14208_betacampus_pos2_rms_y_weekday.txt")
# rms_z_all = np.loadtxt("14208_betacampus_pos2_rms_z_weekday.txt")
rms_x_all = np.loadtxt("14208_Huygensgebouw_rms_x_weekday.txt")
rms_y_all = np.loadtxt("14208_Huygensgebouw_rms_y_weekday.txt")
rms_z_all = np.loadtxt("14208_Huygensgebouw_rms_z_weekday.txt")
f_band = rms_x_all[..., 0]
def VC_curve(v_max, f_trans, f_band):
v = np.zeros_like(f_band)
for i in range(len(f_band)):
if f_band[i] < f_trans:
v[i] = v_max * f_trans / f_band[i]
else:
v[i] = v_max
return v
v_VCA = VC_curve(50./1000., 8., f_band)
v_VCB = VC_curve(25./1000., 8., f_band)
v_VCC = VC_curve(12.5/1000., 8., f_band)
v_VCD = VC_curve(6.25/1000., 8., f_band)
v_VCE = VC_curve(3.125/1000., 8., f_band)
v_VCF = VC_curve(1.5625/1000., 8., f_band)
v_VCG = VC_curve(0.78125/1000., 8., f_band)
v_VCH = VC_curve(0.390625/1000., 8., f_band)
plt.subplot(3,1,1)
plt.plot(rms_x_all[..., 0], rms_x_all[..., 5], c = 'k', ls = 'dotted', label = 'maximum')
# plt.plot(rms_x_all[..., 0], rms_x_all[..., 4], c = 'k', ls = 'dashed', label = 'mean + st.dev.')
plt.plot(rms_x_all[..., 0], rms_x_all[..., 3], c = 'k', ls = 'solid', label = 'mean')
# plt.plot(rms_x_all[..., 0], rms_x_all[..., 2], c = 'k', ls = 'dashed', label = 'mean - st.dev.')
plt.plot(rms_x_all[..., 0], rms_x_all[..., 1], c = 'k', ls = 'dotted', label = 'minimum')
plt.fill_between(rms_x_all[..., 0], rms_x_all[... , 1], rms_x_all[..., 5], color = 'lightgray')
plt.plot(f_band, v_VCA, '-', label = 'VC-A')
plt.plot(f_band, v_VCB, '-', label = 'VC-B')
plt.plot(f_band, v_VCC, '-', label = 'VC-C')
plt.plot(f_band, v_VCD, '-', label = 'VC-D')
plt.plot(f_band, v_VCE, '-', label = 'VC-E')
plt.plot(f_band, v_VCF, '-', label = 'VC-F')
plt.plot(f_band, v_VCG, '-', label = 'VC-G')
plt.plot(f_band, v_VCH, '-', label = 'VC-H')
plt.legend(bbox_to_anchor=(-0.05,-0.5))
plt.xscale('log')
plt.xlim([np.power(10,-0.2), 100])
plt.yscale('log')
plt.grid(which = 'both', lw = 0.3)
plt.xlabel('1/3 octave band frequency [Hz]')
plt.ylabel('RMS of x-signal [mm/s]')
plt.subplot(3,1,2)
plt.plot(rms_y_all[..., 0], rms_y_all[..., 5], c = 'k', ls = 'dotted', label = 'maximum')
# plt.plot(rms_y_all[..., 0], rms_y_all[..., 4], c = 'k', ls = 'dashed', label = 'mean + st.dev.')
plt.plot(rms_y_all[..., 0], rms_y_all[..., 3], c = 'k', ls = 'solid', label = 'mean')
# plt.plot(rms_y_all[..., 0], rms_y_all[..., 2], c = 'k', ls = 'dashed', label = 'mean - st.dev.')
plt.plot(rms_y_all[..., 0], rms_y_all[..., 1], c = 'k', ls = 'dotted', label = 'minimum')
plt.fill_between(rms_y_all[..., 0], rms_y_all[... , 1], rms_y_all[..., 5], color = 'lightgray')
plt.plot(f_band, v_VCA, '-', label = 'VC-A')
plt.plot(f_band, v_VCB, '-', label = 'VC-B')
plt.plot(f_band, v_VCC, '-', label = 'VC-C')
plt.plot(f_band, v_VCD, '-', label = 'VC-D')
plt.plot(f_band, v_VCE, '-', label = 'VC-E')
plt.plot(f_band, v_VCF, '-', label = 'VC-F')
plt.plot(f_band, v_VCG, '-', label = 'VC-G')
plt.plot(f_band, v_VCH, '-', label = 'VC-H')
plt.xscale('log')
plt.xlim([np.power(10,-0.2), 100])
plt.yscale('log')
plt.grid(which = 'both', lw = 0.3)
plt.xlabel('1/3 octave band frequency [Hz]')
plt.ylabel('RMS of y-signal [mm/s]')
plt.subplot(3,1,3)
plt.plot(rms_z_all[..., 0], rms_z_all[..., 5], c = 'k', ls = 'dotted', label = 'maximum')
# plt.plot(rms_z_all[..., 0], rms_z_all[..., 4], c = 'k', ls = 'dashed', label = 'mean + st.dev.')
plt.plot(rms_z_all[..., 0], rms_z_all[..., 3], c = 'k', ls = 'solid', label = 'mean')
# plt.plot(rms_z_all[..., 0], rms_z_all[..., 2], c = 'k', ls = 'dashed', label = 'mean - st.dev.')
plt.plot(rms_z_all[..., 0], rms_z_all[..., 1], c = 'k', ls = 'dotted', label = 'minimum')
plt.fill_between(rms_z_all[..., 0], rms_z_all[... , 1], rms_z_all[..., 5], color = 'lightgray')
plt.plot(f_band, v_VCA, '-', label = 'VC-A')
plt.plot(f_band, v_VCB, '-', label = 'VC-B')
plt.plot(f_band, v_VCC, '-', label = 'VC-C')
plt.plot(f_band, v_VCD, '-', label = 'VC-D')
plt.plot(f_band, v_VCE, '-', label = 'VC-E')
plt.plot(f_band, v_VCF, '-', label = 'VC-F')
plt.plot(f_band, v_VCG, '-', label = 'VC-G')
plt.plot(f_band, v_VCH, '-', label = 'VC-H')
plt.xscale('log')
plt.xlim([np.power(10,-0.2), 100])
plt.yscale('log')
plt.grid(which = 'both', lw = 0.3)
plt.xlabel('1/3 octave band frequency [Hz]')
plt.ylabel('RMS of z-signal [mm/s]')
plt.plot()
plt.subplots_adjust(top=0.95, bottom=0.05, left=0.15, right=0.95, hspace=0.3)
# plt.suptitle('Effective velocities at Betacampus position 1 during weekdays between 6:00 and 18:00 hours')
# plt.suptitle('Effective velocities at Betacampus position 2 during weekdays between 6:00 and 18:00 hours')
plt.suptitle('Effective velocities at Huygensgebouw during weekdays between 6:00 and 18:00 hours')
plt.show()
|
|
# Author: aqeelanwar
# Created: 12 June,2020, 7:06 PM
# Email: aqeel.anwar@gatech.edu
# Trainer: Vinit Gore
# Edited: 17 Dec, 2021
# Email: vinitgore@gmail.com
from tkinter import * # Tkinter is the package for creating simple Graphical User Interfaces (GUIs)
import random # python package to generate random numbers
import time # python package to measure time
import numpy as np # Numpy package is used to handle linear algebraic operations easily
from PIL import ImageTk,Image # PIL package used to process images
# Define useful parameters
'''
These variables act like settings for your Snake game.
Changing these variables will change the behaviour of the game.
'''
size_of_board = 600 # Size of the complete board
rows = 10 # Number of rows on the board
cols = 10 # Number of columns on the board
DELAY = 200 # Speed of the snake (and every step in the program)
snake_initial_length = 3 # Initial length of the snake
RED_COLOR = "#EE4035" # Red color value in hexadecimal format (#000000 to #FFFFFF)
BLUE_COLOR = "#0492CF" # Blue color value in hexadecimal format (#000000 to #FFFFFF)
Green_color = "#7BC043" # Green color value in hexadecimal format (#000000 to #FFFFFF)
BLUE_COLOR_LIGHT = '#67B0CF' # Light Blue color value in hexadecimal format (#000000 to #FFFFFF)
RED_COLOR_LIGHT = '#EE7E77' # Light Red color value in hexadecimal format (#000000 to #FFFFFF)
class SnakeAndApple:
# ------------------------------------------------------------------
# Initialization Functions:
# ------------------------------------------------------------------
def __init__(self):
self.window = Tk() # create an object of Tkinter window
self.window.title("Snake-and-Apple") # title of the window
self.canvas = Canvas(self.window, width=size_of_board, height=size_of_board) # create a canvas on the window having size of board as height and width
self.canvas.pack() # fills the canvas in the window.
# Input from user in form of clicks and keyboard
self.window.bind("<Key>", self.key_input) # Keyboard input by user calls key_input function
self.window.bind("<Button-1>", self.mouse_input) # Button-1 means left-click. mouse_input function called when left click by user
self.play_again() # function defined below
self.begin = False # boolean value that becomes True when game starts
def initialize_board(self):
'''
Initializes the board i.e. initialize apple and board and draw the rows and columns of the board
'''
self.board = [] # empty list that will store all cell positions on the board
self.apple_obj = [] # initialize apple object*
self.old_apple_cell = [] # stores previous apple cells*
# add each cell value to the board
for i in range(rows):
for j in range(cols):
self.board.append((i, j))
# draw row lines
for i in range(rows):
self.canvas.create_line(
i * size_of_board / rows, 0, i * size_of_board / rows, size_of_board,
)
# draw column lines
for i in range(cols):
self.canvas.create_line(
0, i * size_of_board / cols, size_of_board, i * size_of_board / cols,
)
def initialize_snake(self):
'''
Initialize snake object. Define its behaviour. Grow it till initial length.
'''
self.snake = [] # empty list to store positions of the entire snake
self.crashed = False # when snake crashes a wall (i.e. edges of the board), this variable becomes true
self.snake_heading = "Right" # initial direction of the snake
self.last_key = self.snake_heading # value that stores last key entered. Initially same as initial direction.
# these actions are not available to play during the game.
self.forbidden_actions = {}
self.forbidden_actions["Right"] = "Left"
self.forbidden_actions["Left"] = "Right"
self.forbidden_actions["Up"] = "Down"
self.forbidden_actions["Down"] = "Up"
self.snake_objects = []
# Grow till initial length
for i in range(snake_initial_length):
self.snake.append((i, 0))
def play_again(self):
'''
This function is called when the user wants to play again.
'''
self.canvas.delete("all") # library function to clear the window
self.initialize_board() # initialize board
self.initialize_snake() # initialize snake
self.place_apple() # initialize apple
self.display_snake(mode="complete") #
self.begin_time = time.time() # save the begin time value
def mainloop(self):
'''
This function is the main loop. The game will keep running continuously because of the while loop below.
'''
while True: # run infinitely
self.window.update() # update runs the input methods bound to the window
if self.begin: # game running case
if not self.crashed:
self.window.after(DELAY, self.update_snake(self.last_key)) # DELAY has number of seconds used to set the delay of every window update. Snake is updated everytime after DELAY seconds
else: # game over case
self.begin = False
self.display_gameover()
# ------------------------------------------------------------------
# Drawing Functions:
# The modules required to draw required game based object on canvas
# ------------------------------------------------------------------
def display_gameover(self):
'''
Display gameover.
'''
score = len(self.snake)
self.canvas.delete("all")
score_text = "Scores \n"
# Display the string above
self.canvas.create_text(
size_of_board / 2,
3 * size_of_board / 8,
font="cmr 40 bold",
fill=Green_color,
text=score_text,
)
score_text = str(score)
# Display score
self.canvas.create_text(
size_of_board / 2,
1 * size_of_board / 2,
font="cmr 50 bold",
fill=BLUE_COLOR,
text=score_text,
)
time_spent = str(np.round(time.time() - self.begin_time, 1)) + 'sec' # time duration of the game calculated by subtracting current time and begin time.
# Display time_spent
self.canvas.create_text(
size_of_board / 2,
3 * size_of_board / 4,
font="cmr 20 bold",
fill=BLUE_COLOR,
text=time_spent,
)
score_text = "Click to play again \n"
# Display the string above
self.canvas.create_text(
size_of_board / 2,
15 * size_of_board / 16,
font="cmr 20 bold",
fill="gray",
text=score_text,
)
def place_apple(self):
'''
Place apple randomly anywhere except at the cells occupied by snake
'''
unoccupied_cels = set(self.board) - set(self.snake) # find cells on the board unoccupied by snake
self.apple_cell = random.choice(list(unoccupied_cels)) # randomly choose any one of the unoccupied cells
row_h = int(size_of_board / rows) # row-height
col_w = int(size_of_board / cols) # column-width
x1 = self.apple_cell[0] * row_h # x value of the bottom-left corner of the apple
y1 = self.apple_cell[1] * col_w # y value of the bottom-left corner of the apple
x2 = x1 + row_h # x value of the top-right corner of the apple
y2 = y1 + col_w # y value of the top-right corner of the apple
# draw rectangle using the coordinates of the bottom-left and top-right corner cartesian values
self.apple_obj = self.canvas.create_rectangle(
x1, y1, x2, y2, fill=RED_COLOR_LIGHT, outline=BLUE_COLOR, # rectangle color: red, border color: blue
)
def display_snake(self, mode=""):
# Remove tail from display if it exists
if self.snake_objects != []:
self.canvas.delete(self.snake_objects.pop(0))
if mode == "complete":
for i, cell in enumerate(self.snake):
# print(cell)
row_h = int(size_of_board / rows)
col_w = int(size_of_board / cols)
x1 = cell[0] * row_h
y1 = cell[1] * col_w
x2 = x1 + row_h
y2 = y1 + col_w
self.snake_objects.append(
self.canvas.create_rectangle(
x1, y1, x2, y2, fill=BLUE_COLOR, outline=BLUE_COLOR,
)
)
else:
# only update head
cell = self.snake[-1]
row_h = int(size_of_board / rows)
col_w = int(size_of_board / cols)
x1 = cell[0] * row_h
y1 = cell[1] * col_w
x2 = x1 + row_h
y2 = y1 + col_w
self.snake_objects.append(
self.canvas.create_rectangle(
x1, y1, x2, y2, fill=BLUE_COLOR, outline=RED_COLOR,
)
)
if self.snake[0] == self.old_apple_cell:
self.snake.insert(0, self.old_apple_cell)
self.old_apple_cell = []
tail = self.snake[0]
row_h = int(size_of_board / rows)
col_w = int(size_of_board / cols)
x1 = tail[0] * row_h
y1 = tail[1] * col_w
x2 = x1 + row_h
y2 = y1 + col_w
self.snake_objects.insert(
0,
self.canvas.create_rectangle(
x1, y1, x2, y2, fill=BLUE_COLOR, outline=RED_COLOR
),
)
self.window.update()
# ------------------------------------------------------------------
# Logical Functions:
# The modules required to carry out game logic
# ------------------------------------------------------------------
def update_snake(self, key):
# Check if it hit the wall or its own body
tail = self.snake[0]
head = self.snake[-1]
if tail != self.old_apple_cell: # delete one cell from tail of snake to move it ahead by one position
self.snake.pop(0)
# add one cell from head to move it ahead
if key == "Left":
self.snake.append((head[0] - 1, head[1]))
elif key == "Right":
self.snake.append((head[0] + 1, head[1]))
elif key == "Up":
self.snake.append((head[0], head[1] - 1))
elif key == "Down":
self.snake.append((head[0], head[1] + 1))
head = self.snake[-1]
# Hit the wall / Hit on body
if (
head[0] > cols - 1
or head[0] < 0
or head[1] > rows - 1
or head[1] < 0
or len(set(self.snake)) != len(self.snake)
):
self.crashed = True
# Got the apple
elif self.apple_cell == head:
self.old_apple_cell = self.apple_cell
self.canvas.delete(self.apple_obj)
self.place_apple()
self.display_snake()
else:
self.snake_heading = key
self.display_snake()
def check_if_key_valid(self, key):
'''
Check if the key entered from the keyboard is valid or not.
'''
valid_keys = ["Up", "Down", "Left", "Right"] # only these keys should change the behavior of the snake
# key should be among the valid keys and should not be a forbidden action
if key in valid_keys and self.forbidden_actions[self.snake_heading] != key:
return True
else:
return False
def mouse_input(self, event):
'''
Function called when mouse left-click is pressed.
'''
self.play_again()
def key_input(self, event):
'''
Function called when keyboard input is pressed.
'''
if not self.crashed:
key_pressed = event.keysym
# Check if the pressed key is a valid key
if self.check_if_key_valid(key_pressed):
print(key_pressed)
self.begin = True
self.last_key = key_pressed
game_instance = SnakeAndApple() # create a game instance. __init__() function is called here.
game_instance.mainloop() # main loop runs the game.
|
|
import os
import cv2
import numpy as np
from math import exp
import tensorflow as tf
from base64 import encodebytes
from PIL import Image, ImageFont, ImageDraw, ImageOps
from flask import Flask, flash, request, redirect, url_for, render_template,Response
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
with open('labels.txt', 'r') as f:
class_names = f.readlines()
class_names= [x.strip() for x in class_names]
classes=dict(zip(list(range(10)),class_names))
f.close()
tflite_model= os.path.join(os.path.realpath(os.path.join(os.getcwd(), os.path.dirname('.\\model.tflite'))), 'model.tflite')
class tensorflowTransform(object):
def tf_transforms(self,img):
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
np_img=np.array(img)/255.0
for channel in range(3):
np_img[:,:,channel] = (np_img[:,:,channel] - mean[channel]) / std[channel]
image = tf.expand_dims(np_img, 0)
augmented_image = tf.keras.layers.experimental.preprocessing.Resizing(224, 224, interpolation = "bilinear")(image)
augmented_image = tf.keras.layers.experimental.preprocessing.CenterCrop(height = 224, width = 224)(augmented_image)
augmented_image=tf.transpose(augmented_image, [0, 3, 1, 2])
return augmented_image
class TensorflowLiteClassificationModel:
def __init__(self, model_path, labels,transform,classes,image_size=224):
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()
self._input_details = self.interpreter.get_input_details()
self._output_details = self.interpreter.get_output_details()
self.labels = labels
self.image_size=image_size
self.transform= transform
self.classes=classes
def run_from_filepath(self, image_path):
image = Image.open(image_path)
x = self.transform.tf_transforms(image)
return self.run(x)
def run(self, image):
"""
args:
image: a (1, image_size, image_size, 3) np.array
Returns list of [Label, Probability], of type List<str, float>
"""
self.interpreter.set_tensor(self._input_details[0]["index"], image)
self.interpreter.invoke()
tflite_interpreter_output = self.interpreter.get_tensor(self._output_details[0]["index"])
probabilities = np.array(tflite_interpreter_output[0])
exp_x=[exp(x) for x in probabilities]
probabilities=[exp(x)/sum(exp_x) for x in probabilities]
# create list of ["label", probability], ordered descending probability
label_to_probabilities = []
for i, probability in enumerate(probabilities):
label_to_probabilities.append([self.labels[i], float(probability)])
pClass=sorted(label_to_probabilities, key=lambda element: element[1])[-1]
cls= self.classes[pClass[0]]
p=pClass[1]
return cls,p
def tfliteModel_Prediction(imgPath):
labels=range(10)
model = TensorflowLiteClassificationModel(tflite_model,labels,tensorflowTransform(),classes)
clss,p= model.run_from_filepath(imgPath)
return clss,p
def PredOnClass(class_,imgPath):
image = Image.open(imgPath)
right = 0
left = 0
top = 80
bottom = 0
width, height = image.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(image.mode, (new_width, new_height), (0, 0, 0))
result.paste(image, (left, top))
# result.save('output.jpg')
title_font = ImageFont.truetype('font.ttf', 30)
title_text = class_
image_editable = ImageDraw.Draw(result)
image_editable.text((15,5), title_text, (237, 230, 211),font=title_font)
result.save(imgPath)
##############################################################################################
app = Flask(__name__)
UPLOAD_FOLDER = 'static/uploads/'
outputimage = None
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['GET','POST'])
def upload_image():
global outputimage
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename= file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
imgPath= os.path.join(os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(filename))),'static/uploads',filename)
class_,prob =tfliteModel_Prediction(imgPath)
path_="static/uploads/"+filename
PredOnClass(class_,path_)
outputimage = cv2.imread(path_,cv2.COLOR_BGR2RGB)
# os.remove("static/uploads/"+filename)
return render_template('index.html')
else:
flash('Allowed image types are - png, jpg, jpeg, gif')
return redirect(request.url)
def generate_feed():
global outputimage
try:
(flag, encodedImage) = cv2.imencode('.JPEG', outputimage)
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
except:
return "no Image"
@app.route("/image_feed")
def image_feed():
return Response(generate_feed(),
mimetype = "multipart/x-mixed-replace; boundary=frame")
try:
if __name__ == "__main__":
app.run(debug = True)
except:
print('unable to open port')
|
|
import pandas as pd
import os
import numpy as np
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import cv2
import gc
from scipy import ndimage
import matplotlib.colors as colors
class ThicknessMapUtils():
def __init__(self, label_path, image_path, prediction_path):
self.label_path = None
self.image_path = None
self.prediction_path = None
def percentual_deviance(self, label, prediction):
return np.round(np.mean(np.abs(label - prediction)) / np.mean(label), 2)
def load_images(self, record_name):
label = np.load(os.path.join(self.label_path, record_name))
prediction = np.load(os.path.join(self.prediction_path, record_name))
image = cv2.imread(os.path.join(self.image_path, record_name.replace(".npy", ".jpeg")))
label_mu = self.pixel_to_mu_meter(label)
prediction_mu = self.pixel_to_mu_meter(prediction)
# resize prediciton
prediction_mu = self.resize_prediction(prediction_mu)
# remove three channel to stop normalization
label_lr = self.get_low_res_depth_grid(label_mu)[:, :, 0]
prediction_lr = self.get_low_res_depth_grid(prediction_mu)[:, :, 0]
return (label_mu, prediction_mu, label_lr, prediction_lr, image)
def createCircularMask(self, h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = [int(w / 2), int(h / 2)]
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
mask = dist_from_center <= radius
return mask
def get_zone(self, record_th_z, zone):
zone_value = record_th_z[record_th_z.Name == zone].AvgThickness.iloc[0]
# mean of all zone thickness
zone_avg = np.nanmean(np.array(record_th_z.AvgThickness, dtype = np.float32))
if zone_value is None:
zone_value = zone_avg
return (float(zone_value))
def extract_values(self, record_th_z):
C0_value = self.get_zone(record_th_z, "C0")
S2_value = self.get_zone(record_th_z, "S2")
S1_value = self.get_zone(record_th_z, "S1")
N1_value = self.get_zone(record_th_z, "N1")
N2_value = self.get_zone(record_th_z, "N2")
I1_value = self.get_zone(record_th_z, "I1")
I2_value = self.get_zone(record_th_z, "I2")
T1_value = self.get_zone(record_th_z, "T1")
T2_value = self.get_zone(record_th_z, "T2")
return (C0_value, S2_value, S1_value, N1_value, N2_value, I1_value, I2_value, T1_value, T2_value)
def set_low_res_depth_grid(self, C0_value, S2_value, S1_value, N1_value, N2_value, I1_value, I2_value, T1_value,
T2_value,
C0, S2, S1, N1, N2, I1, I2, T1, T2, img):
img[C0] = C0_value
img[S1] = S1_value
img[S2] = S2_value
img[I1] = I1_value
img[I2] = I2_value
img[T1] = T1_value
img[T2] = T2_value
img[N1] = N1_value
img[N2] = N2_value
return img
def rescale_oct_height(self, depth_map):
scaling_factor = np.divide(496, 160, dtype = np.float32)
rescaled_depth_map = depth_map * scaling_factor
return rescaled_depth_map
def center_img(self, img):
global centered_img
coords = np.argwhere(img > 0)
x_min, y_min = coords.min(axis = 0)[0:2]
x_max, y_max = coords.max(axis = 0)[0:2]
cropped_img = img[x_min:x_max - 1, y_min:y_max - 1]
if len(cropped_img.shape) == 2:
square_cropped_img = cropped_img[0:min(cropped_img.shape), 0:min(cropped_img.shape)]
centered_img = np.zeros((768, 768))
nb = centered_img.shape[0]
na = square_cropped_img.shape[0]
lower = (nb) // 2 - (na // 2)
upper = (nb // 2) + (na // 2)
difference = np.abs(lower - upper) - square_cropped_img.shape[0]
upper = upper - difference
centered_img[lower:upper, lower:upper] = square_cropped_img
if len(cropped_img.shape) == 3:
square_cropped_img = cropped_img[0:min(cropped_img.shape[0:2]), 0:min(cropped_img.shape[0:2]), :]
centered_img = np.zeros((768, 768, 3)).astype(np.uint8)
nb = centered_img.shape[0]
na = square_cropped_img.shape[0]
lower = (nb) // 2 - (na // 2)
upper = (nb // 2) + (na // 2)
difference = np.abs(lower - upper) - square_cropped_img.shape[0]
upper = upper - difference
centered_img[lower:upper, lower:upper, :] = square_cropped_img
return (centered_img)
def get_low_res_grid(self, img):
# scale of LOCALIZER
outer_ring_radius = int(6 / 0.0118) / 2
middle_ring_radius = int(3 / 0.0118) / 2
inner_ring_radius = int(1 / 0.0118) / 2
min_ = min(img.nonzero()[0]), min(img.nonzero()[1])
max_ = max(img.nonzero()[0]), max(img.nonzero()[1])
image_span = np.subtract(max_, min_)
measure_area = np.zeros(image_span)
nrows = img.shape[0]
ncols = img.shape[1]
cnt_row = image_span[1] / 2 + min_[1]
cnt_col = image_span[0] / 2 + min_[0]
max_diam = min(image_span)
# init empty LOCALIZER sized grid
img_mask = np.zeros((nrows, ncols), np.float32)
# create base boolean masks
inner_ring_mask = self.createCircularMask(nrows, ncols, center = (cnt_row, cnt_col), radius = inner_ring_radius)
middle_ring_mask = self.createCircularMask(nrows, ncols, center = (cnt_row, cnt_col),
radius = middle_ring_radius)
# fit low res grid to measurement area
if outer_ring_radius * 2 > max_diam:
outer_ring_radius = max_diam / 2
outer_ring_mask = self.createCircularMask(nrows, ncols, center = (cnt_row, cnt_col), radius = outer_ring_radius)
inner_disk = inner_ring_mask
middle_disk = (middle_ring_mask.astype(int) - inner_ring_mask.astype(int)).astype(bool)
outer_disk = (outer_ring_mask.astype(int) - middle_ring_mask.astype(int)).astype(bool)
# create label specific diagonal masks
upper_triangel_right_mask = np.arange(0, img.shape[1])[:, None] <= np.arange(img.shape[1])
lower_triangel_left_mask = np.arange(0, img.shape[1])[:, None] > np.arange(img.shape[1])
upper_triangel_left_mask = lower_triangel_left_mask[::-1]
lower_triangel_right_mask = upper_triangel_right_mask[::-1]
''''
#pad the shortened arrays
im_utr = np.zeros((768,768))
im_ltl = np.zeros((768,768))
im_utl = np.zeros((768,768))
im_ltr = np.zeros((768,768))
#pad the diagonal masks
im_utr[0:upper_triangel_right_mask.shape[0],:] = upper_triangel_right_mask
im_ltl[768-upper_triangel_right_mask.shape[0]:,:] = lower_triangel_left_mask
im_utl[0:upper_triangel_left_mask.shape[0], :] = upper_triangel_left_mask
im_ltr[768-lower_triangel_right_mask.shape[0]:, :] = lower_triangel_right_mask
#conversion
im_utr = im_utr.astype(np.bool)
im_ltl = im_ltl.astype(np.bool)
im_utl = im_utl.astype(np.bool)
im_ltr = im_ltr.astype(np.bool)
'''
# create 9 depth regions
C0 = inner_ring_mask
S2 = np.asarray(upper_triangel_left_mask & outer_disk & upper_triangel_right_mask)
S1 = np.asarray(upper_triangel_left_mask & middle_disk & upper_triangel_right_mask)
N1 = np.asarray(lower_triangel_right_mask & middle_disk & upper_triangel_right_mask)
N2 = np.asarray(lower_triangel_right_mask & outer_disk & upper_triangel_right_mask)
I1 = np.asarray(lower_triangel_right_mask & middle_disk & lower_triangel_left_mask)
I2 = np.asarray(lower_triangel_right_mask & outer_disk & lower_triangel_left_mask)
T1 = np.asarray(upper_triangel_left_mask & middle_disk & lower_triangel_left_mask)
T2 = np.asarray(upper_triangel_left_mask & outer_disk & lower_triangel_left_mask)
return C0, S2, S1, N1, N2, I1, I2, T1, T2
def get_depth_grid_edges(self, area):
struct = ndimage.generate_binary_structure(2, 2)
erode = ndimage.binary_erosion(area, struct)
edges = area ^ erode
return np.stack((edges,) * 3, axis = -1)
def get_low_res_grid_shape(self, img):
C0, S2, S1, N1, N2, I1, I2, T1, T2 = self.get_low_res_grid(img)
grid = np.zeros((img.shape[0], img.shape[1], 3), np.float32)
grid = grid + self.get_depth_grid_edges(C0)
grid = grid + self.get_depth_grid_edges(S1)
grid = grid + self.get_depth_grid_edges(S2)
grid = grid + self.get_depth_grid_edges(I1)
grid = grid + self.get_depth_grid_edges(I2)
grid = grid + self.get_depth_grid_edges(T1)
grid = grid + self.get_depth_grid_edges(T2)
grid = grid + self.get_depth_grid_edges(N1)
grid = grid + self.get_depth_grid_edges(N2)
return grid
def get_low_res_depth_grid(self, img):
C0, S2, S1, N1, N2, I1, I2, T1, T2 = self.get_low_res_grid(img)
grid = np.zeros((img.shape[0], img.shape[1], 3), np.float32)
grid[C0] = np.mean(img[C0])
grid[S1] = np.mean(img[S1])
grid[S2] = np.mean(img[S2])
grid[I1] = np.mean(img[I1])
grid[I2] = np.mean(img[I2])
grid[T1] = np.mean(img[T1])
grid[T2] = np.mean(img[T2])
grid[N1] = np.mean(img[N1])
grid[N2] = np.mean(img[N2])
return grid
def pixel_to_mu_meter(self, img):
img_um = np.multiply(img, 0.0039 * 1000)
return img_um
def get_low_res_depth_grid_values(self, img):
C0, S1, S2, N1, N2, I1, I2, T1, T2 = self.get_low_res_grid(img)
# turn zero to nan
img[img < 10] = 0
img[img == 0] = np.nan
# get mean values
C0_value = np.nanmean(img[C0])
S1_value = np.nanmean(img[S1])
S2_value = np.nanmean(img[S2])
I1_value = np.nanmean(img[I1])
I2_value = np.nanmean(img[I2])
T1_value = np.nanmean(img[T1])
T2_value = np.nanmean(img[T2])
N1_value = np.nanmean(img[N1])
N2_value = np.nanmean(img[N2])
# concert back nan values to zero
img = np.nan_to_num(img)
low_grid_values = [C0_value, S1_value, S2_value, N1_value, N2_value, I1_value, I2_value, T1_value, T2_value]
return low_grid_values
def get_low_res_depth_grid_maxvalues(self, img):
C0, S1, S2, N1, N2, I1, I2, T1, T2 = self.get_low_res_grid(img)
# turn zero to nan
img[img < 10] = 0
img[img == 0] = np.nan
# get mean values
C0_value = np.max(img[C0])
S1_value = np.max(img[S1])
S2_value = np.max(img[S2])
I1_value = np.max(img[I1])
I2_value = np.max(img[I2])
T1_value = np.max(img[T1])
T2_value = np.max(img[T2])
N1_value = np.max(img[N1])
N2_value = np.max(img[N2])
# concert back nan values to zero
img = np.nan_to_num(img)
low_grid_values = [C0_value, S1_value, S2_value, N1_value, N2_value, I1_value, I2_value, T1_value, T2_value]
return low_grid_values
def get_text_coord(self, img):
C0, S1, S2, N1, N2, I1, I2, T1, T2 = self.get_low_res_grid(img)
S1_x_mc = np.median(np.where(S1 == True)[1])
S1_y_mc = np.median(np.where(S1 == True)[0])
S2_x_mc = np.median(np.where(S2 == True)[1])
S2_y_mc = np.median(np.where(S2 == True)[0])
N1_x_mc = np.median(np.where(N1 == True)[1])
N1_y_mc = np.median(np.where(N1 == True)[0])
N2_x_mc = np.median(np.where(N2 == True)[1])
N2_y_mc = np.median(np.where(N2 == True)[0])
I1_x_mc = np.median(np.where(I1 == True)[1])
I1_y_mc = np.median(np.where(I1 == True)[0])
I2_x_mc = np.median(np.where(I2 == True)[1])
I2_y_mc = np.median(np.where(I2 == True)[0])
T1_x_mc = np.median(np.where(T1 == True)[1])
T1_y_mc = np.median(np.where(T1 == True)[0])
T2_x_mc = np.median(np.where(T2 == True)[1])
T2_y_mc = np.median(np.where(T2 == True)[0])
C0_x_mc = S1_x_mc
C0_y_mc = N2_y_mc
coord_list = [C0_x_mc, C0_y_mc, S1_x_mc, S1_y_mc, S2_x_mc, S2_y_mc, \
N1_x_mc, N1_y_mc, N2_x_mc, N2_y_mc, I1_x_mc, I1_y_mc, I2_x_mc, I2_y_mc, \
T1_x_mc, T1_y_mc, T2_x_mc, T2_y_mc]
return coord_list
def pixel_to_mu_meter(self, img):
img_um = np.multiply(img, 0.0039 * 1000)
return img_um
def resize_prediction(self, img):
prediction_resized = cv2.resize(img, (768, 768))
return prediction_resized
def write_depthgrid_values(self, coord_list, value_list, text_size):
for i in range(0, int(len(coord_list) / 2)):
plt.text(coord_list[i * 2], coord_list[(i + 1) * 2 - 1], str(int(value_list[i])), ha = 'center',
va = 'center',
bbox = dict(facecolor = 'white'), size = text_size)
def plot_fundus(self, label_path, image_path, save_name, save_path, laterality):
# center image
label_mu = cv2.resize(np.load(label_path), (768, 768))
label_mu[label_mu < 25] = 0
label_mu = self.center_img(label_mu)
# load image and set margin to zero and center
fundus_image = cv2.resize(cv2.imread(image_path), (768, 768))
fundus_image[label_mu == 0] = 0
fundus_image = self.center_img(fundus_image)
plt.figure(figsize = (10, 10))
plt.subplot(1, 1, 1)
plt.imshow(fundus_image)
plt.title("fundus: record:{}, laterality: {}".format(save_name, laterality))
plt.savefig(os.path.join(save_path, str(save_name)))
plt.close()
def plot_fundus_label_and_prediction(self, label_path, prediction_path, image_path,
save_path, save_name, laterality, full_abt, answers):
cm_heidelberg = self.heidelberg_colormap()
save_name = str(save_name) + ".png"
prediction_mu = cv2.resize(np.load(os.path.join(prediction_path,
save_name.replace(".png", ".npy"))).reshape(1, 256, 256, 1)[0,
:, :,
0], (768, 768)) * 500.
label_mu = cv2.resize(np.load(os.path.join(label_path,
save_name.replace(".png", ".npy"))), (768, 768))
# center image
label_mu[label_mu < 25] = 0
label_mu = self.center_img(label_mu)
# center image
prediction_mu[prediction_mu < 25] = 0
prediction_mu = self.center_img(prediction_mu)
percentual_dev = self.percentual_deviance(label_mu, prediction_mu)
# load image and set margin to zero and center
fundus_image = cv2.resize(cv2.imread(os.path.join(image_path,
save_name)), (768, 768))
fundus_image[label_mu == 0] = 0
fundus_image = self.center_img(fundus_image)
# get values for low res grid and coordinates
label_mu = np.nan_to_num(label_mu)
low_grid_values_label = self.get_low_res_depth_grid_values(label_mu)
# get values for low res grid and coordinates
prediction_mu = np.nan_to_num(prediction_mu)
low_grid_values_prediction = self.get_low_res_depth_grid_values(prediction_mu)
# overlay nine area grid
prediction_mu = np.nan_to_num(prediction_mu)
prediction = np.copy(prediction_mu)
low_res_grid_prediction = self.get_low_res_grid_shape(prediction_mu)
prediction[low_res_grid_prediction.astype(np.bool)[:, :, 0]] = 0
# overlay nine area grid
label_mu = np.nan_to_num(label_mu)
label = np.copy(label_mu)
low_res_grid = self.get_low_res_grid_shape(label_mu)
label[low_res_grid.astype(np.bool)[:, :, 0]] = 0
coord_list = self.get_text_coord(label_mu)
title_text = "Fundus: Refferal answer: {}, a/e/n answer: {} \n " \
"Fundus + prediction: Refferal answer: {}, a/e/n answer: {}\n" \
"Gold standard referral: {}, Gold standard a/e/n answer: {}"
plt.figure(figsize = (40, 20))
sup_text_size = 35
text_size = 30
plt.suptitle(title_text.format(answers["referral_answer_f"], answers["a_e_n_answer_f"],
answers["referral_answer_fp"], answers["a_e_n_answer_fp"],
answers["gold_standard_referral"], answers["gold_standard_a_e_n"]),
size = sup_text_size)
# PLOT FUNDUS
plt.subplot(2, 3, 1)
plt.imshow(fundus_image)
plt.title("Record with percentual deviance of: {}".format(str(percentual_dev)), size = text_size)
# PLOT LABEL
plt.subplot(2, 3, 2)
label_mu = np.ma.masked_where(label_mu < 100, label_mu)
cmap = cm_heidelberg
cmap.set_bad(color = 'black')
plt.imshow(label_mu, cmap = cmap, vmin = 100, vmax = 750)
plt.title("laterality: {}".format(laterality), size = text_size)
plt.colorbar(fraction = 0.046, pad = 0.04).ax.tick_params(labelsize = text_size * 0.8)
plt.title("Groundtruth thickness map", size = text_size)
# PLOT LABEL WITH LOW RES GRID
plt.subplot(2, 3, 3)
label = np.ma.masked_where(label < 100, label)
cmap = cm_heidelberg
cmap.set_bad(color = 'black')
plt.imshow(label, cmap = cmap, vmin = 100, vmax = 750)
# plt.title("low res:{}, laterality: {}".format(save_name,laterality))
self.write_depthgrid_values(coord_list, low_grid_values_label, text_size - 10)
plt.colorbar(fraction = 0.046, pad = 0.04).ax.tick_params(labelsize = text_size * 0.8)
# PLOT PREDICTION
plt.subplot(2, 3, 4)
prediction_mu = np.ma.masked_where(prediction_mu < 100, prediction_mu)
cmap = cm_heidelberg
cmap.set_bad(color = 'black')
plt.imshow(prediction_mu, cmap = cmap, vmin = 100, vmax = 750)
# plt.title("high res:{}, laterality: {}".format(save_name, laterality))
plt.colorbar(fraction = 0.046, pad = 0.04).ax.tick_params(labelsize = text_size * 0.8)
plt.title("Predicted thickness map", size = text_size)
# PLOT PREDICTION WITH LOW RED GRID
plt.subplot(2, 3, 5)
prediction = np.ma.masked_where(prediction < 100, prediction)
cmap = cm_heidelberg
cmap.set_bad(color = 'black')
plt.imshow(prediction, cmap = cmap, vmin = 100, vmax = 750)
plt.title("laterality: {}".format(laterality), size = text_size)
self.write_depthgrid_values(coord_list, low_grid_values_prediction, text_size - 10)
plt.colorbar(fraction = 0.046, pad = 0.04).ax.tick_params(labelsize = text_size * 0.8)
plt.savefig(os.path.join(save_path, str(save_name)))
plt.close()
def heidelberg_colormap(self):
from matplotlib.colors import LinearSegmentedColormap
plt.figsize = (40, 40)
plt.close('all')
cdict = {
'blue': ((0.0, 0.0, 0.0), # black
(0.1, 1.0, 1.0), # purple
(0.2, 1.0, 1.0), # blue
(0.3, 0.0, 0.0), # green
(0.4, 0.0, 0.0), # yellow
(0.55, 0.0, 0.0), # red
(0.65, 1.0, 1.0), # white
(1.0, 1.0, 1.0)), # white
'green': ((0.0, 0.0, 0.0), # black
(0.1, 0.0, 0.0), # purple
(0.2, 0.0, 0.0), # blue
(0.3, 1.0, 1.0), # green
(0.4, 1.0, 1.0), # yellow
(0.55, 0.0, 0.0), # red
(0.65, 1.0, 1.0), # white
(1.0, 1.0, 1.0)),
'red': ((0.0, 0.0, 0.0), # black
(0.1, 1.0, 1.0), # purple
(0.2, 0.0, 0.0), # blue
(0.3, 0.0, 0.0), # green
(0.4, 1.0, 1.0), # yellow
(0.55, 1.0, 1.0), # red
(0.65, 1.0, 1.0), # white
(1.0, 1.0, 1.0)),
}
cm_heidelberg = LinearSegmentedColormap('bgr', cdict)
return cm_heidelberg
def plot_fundus_label_or_prediction_heidelberg_cs(self, record_path, image_path, save_path, save_name, laterality,
prediction=True):
if prediction:
label_mu = cv2.resize(np.load(record_path).reshape(1, 256, 256, 1)[0, :, :, 0], (768, 768)) * 500.
else:
label_mu = cv2.resize(np.load(record_path), (768, 768))
cm_heidelberg = self.heidelberg_colormap()
# center image
label_mu[label_mu < 25] = 0
label_mu = self.center_img(label_mu)
# load image and set margin to zero and center
fundus_image = cv2.resize(cv2.imread(image_path), (768, 768))
fundus_image[label_mu == 0] = 0
fundus_image = self.center_img(fundus_image)
# get values for low res grid and coordinates
label_mu = np.nan_to_num(label_mu)
low_grid_values_label = self.get_low_res_depth_grid_values(label_mu)
# overlay nine area grid
label_mu = np.nan_to_num(label_mu)
label = np.copy(label_mu)
low_res_grid = self.get_low_res_grid_shape(label_mu)
label[low_res_grid.astype(np.bool)[:, :, 0]] = 0
coord_list = self.get_text_coord(label_mu)
plt.figure(figsize = (20, 20))
plt.subplot(1, 3, 1)
plt.imshow(fundus_image)
plt.title("fundus")
plt.subplot(1, 3, 2)
label_mu = np.ma.masked_where(label_mu < 100, label_mu)
label_mu[label_mu > 500.0] = 1000
cmap = cm_heidelberg
cmap.set_bad(color = 'black')
plt.imshow(label_mu, cmap = cmap, vmin = 100, vmax = 750)
plt.title("high res:{}, laterality: {}".format(save_name, laterality))
plt.colorbar(fraction = 0.046, pad = 0.04)
plt.subplot(1, 3, 3)
label = np.ma.masked_where(label < 100, label)
cmap = cm_heidelberg
cmap.set_bad(color = 'black')
plt.imshow(label, cmap = cmap, vmin = 100, vmax = 750)
plt.title("low res:{}, laterality: {}".format(save_name, laterality))
self.write_depthgrid_values(coord_list, low_grid_values_label)
plt.colorbar(fraction = 0.046, pad = 0.04)
plt.savefig(os.path.join(save_path, str(save_name)))
plt.close()
|
|
#!/usr/bin/env python
# coding: utf-8
# ## Imports
# In[7]:
import pandas as pd
import numpy as np
import streamlit as st
from PIL import Image
import os
import pickle
#Open model created by the notebook
model = pickle.load(open('model/box_office_model.pkl','rb'))
#create main page
def main():
image = Image.open('assets/Box_Office.jfif')
st.image(image, use_column_width=False)
add_selectbox = st.sidebar.selectbox('How would you like to predict?', ('Online', 'test'))
st.sidebar.info('This app is created to predict revenue for movies' )
st.sidebar.success('DAT158')
st.title('Box Office Predictions')
if add_selectbox == 'Online':
budget = st.number_input('budget', min_value=0, max_value=1000000000, value=1000000)
popularity = st.number_input('popularity', min_value=0., max_value=100., value=0., format="%.2f", step=1.)
runtime = st.number_input('runtime', min_value=0., max_value=500., value=0., format="%.2f", step=1.)
inputs = [[budget,runtime,popularity]]
inputs_scaled = StandardScaler().fit_transform(inputs)
if st.button('Predict'):
result = model.predict(inputs)
#format_result = "{:.2f}".format(float(result))
print(result)
st.success('Predicted output: €{:,.2f}'.format(float(result)))
#Start application
if __name__ =='__main__':
main()
|
|
import math
import pandas as pd
import numpy as np
import os
from src.datasets import Dataset
from sklearn.metrics import roc_auc_score
"""
@author: Astha Garg 10/19
"""
class Wadi(Dataset):
def __init__(self, seed: int, remove_unique=False, entity=None, verbose=False, one_hot=False):
"""
:param seed: for repeatability
:param entity: for compatibility with multi-entity datasets. Value provided will be ignored. self.entity will be
set to same as dataset name for single entity datasets
"""
super().__init__(name="wadi", file_name="WADI_14days.csv")
self.raw_path_train = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"data", "raw", "wadi", "raw", "WADI_14days.csv")
self.raw_path_test = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"data", "raw", "wadi", "raw", "WADI_attackdata.csv")
self.anomalies_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"data", "raw", "wadi", "raw", "WADI_anomalies.csv")
self.seed = seed
self.remove_unique = remove_unique
self.verbose = verbose
self.one_hot = one_hot
def load(self):
# 1 is the outlier, all other digits are normal
OUTLIER_CLASS = 1
test_df: pd.DataFrame = pd.read_csv(self.raw_path_test, header=0)
train_df: pd.DataFrame = pd.read_csv(self.raw_path_train, header=3)
# Removing 4 columns who only contain nans (data missing from the csv file)
nan_columns = [r'\\WIN-25J4RO10SBF\LOG_DATA\SUTD_WADI\LOG_DATA\2_LS_001_AL',
r'\\WIN-25J4RO10SBF\LOG_DATA\SUTD_WADI\LOG_DATA\2_LS_002_AL',
r'\\WIN-25J4RO10SBF\LOG_DATA\SUTD_WADI\LOG_DATA\2_P_001_STATUS',
r'\\WIN-25J4RO10SBF\LOG_DATA\SUTD_WADI\LOG_DATA\2_P_002_STATUS']
train_df = train_df.drop(nan_columns, axis=1)
test_df = test_df.drop(nan_columns, axis=1)
train_df = train_df.rename(columns={col: col.split('\\')[-1] for col in train_df.columns})
test_df = test_df.rename(columns={col: col.split('\\')[-1] for col in test_df.columns})
# Adding anomaly labels as a column in the dataframes
ano_df = pd.read_csv(self.anomalies_path, header=0)
train_df["y"] = np.zeros(train_df.shape[0])
test_df["y"] = np.zeros(test_df.shape[0])
causes = []
for i in range(ano_df.shape[0]):
ano = ano_df.iloc[i, :][["Start_time", "End_time", "Date"]]
start_row = np.where((test_df["Time"].values == ano["Start_time"]) &
(test_df["Date"].values == ano["Date"]))[0][0]
end_row = np.where((test_df["Time"].values == ano["End_time"]) &
(test_df["Date"].values == ano["Date"]))[0][0]
test_df["y"].iloc[start_row:(end_row + 1)] = np.ones(1 + end_row - start_row)
causes.append(ano_df.iloc[i, :]["Causes"])
# Removing time and date from features
train_df = train_df.drop(["Time", "Date", "Row"], axis=1)
test_df = test_df.drop(["Time", "Date", "Row"], axis=1)
if self.one_hot:
# actuator colums (categoricals) with < 2 categories (all of these have 3 categories)
one_hot_cols = ['1_MV_001_STATUS', '1_MV_002_STATUS', '1_MV_003_STATUS', '1_MV_004_STATUS', '2_MV_003_STATUS',
'2_MV_006_STATUS', '2_MV_101_STATUS', '2_MV_201_STATUS', '2_MV_301_STATUS', '2_MV_401_STATUS',
'2_MV_501_STATUS', '2_MV_601_STATUS']
# combining before encoding because some categories only seen in test
one_hot_encoded = Dataset.one_hot_encoding(pd.concat([train_df, test_df], axis=0, join="inner"),
col_names=one_hot_cols)
train_df = one_hot_encoded.iloc[:len(train_df)]
test_df = one_hot_encoded.iloc[len(train_df):]
X_train, y_train, X_test, y_test = self.format_data(train_df, test_df, OUTLIER_CLASS, verbose=self.verbose)
X_train, X_test = Dataset.standardize(X_train, X_test, remove=self.remove_unique, verbose=self.verbose)
matching_col_names = np.array([col.split("_1hot")[0] for col in train_df.columns])
self.causes = []
for event in causes:
event_causes = []
for chan_name in get_chan_name(event):
event_causes.extend(np.argwhere(chan_name == matching_col_names).ravel())
self.causes.append(event_causes)
self.visible_causes = [[5, 6, 10, 14, 16, "105"], [6, "9", "13", 14, 16, "18", "21", "40", 49, 55, 61, 68, 102],
[1, 5, 6, "9", 14, 16, 23, 25, 26, 29, 35, "37", 43, 47, 51, 63, 67], [24, 27, 30, 33,
36, 40, 63, 64,
65, 66, 67, 68,
82, 84, 86, 87,
89, 90, 91, 104],
[9, 18, 20, 22, 25, 39, 40, 42, 43, "61", 86, 87, 89, 90, 91, 102], [1, 2, 3, 6, 11, 12,
14, 16, 23, 26, 29,
32, 35, 38],
[62, 110], [19, 39], [1, 2, 3, 4, 5, 6, 10, 14, 16, 18, 39, 71], ["18", "39", 62, "71",
86, 100, 110, 111],
[29, 32, 35, 38, "40", 62, 98, 99, 110, 111], [88, 123], [14, 16, 33, 56, 110, 111],
[1, 3, 6, 14, 16, 61, 101, 103]]
self._data = tuple([X_train, y_train, X_test, y_test])
def get_root_causes(self):
return self.causes
def get_chan_name(chan_list_str):
if len(chan_list_str) > 2:
chan_list_str = chan_list_str[2:-2]
chan_list = chan_list_str.split("', '")
return chan_list
else:
return []
def main():
from src.algorithms import AutoEncoder
seed = 0
wadi = Wadi(seed=seed)
x_train, y_train, x_test, y_test = wadi.data()
print(wadi.causes)
# model = AutoEncoder(sequence_length=30, num_epochs=5, hidden_size=15, lr=1e-4, gpu=0)
# model.fit(x_train)
# error = model.predict(x_test)
# print(roc_auc_score(y_test, error)) # e.g. 0.8614
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import librosa
import numpy as np
import os
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
def conv_net(X,W,b,keepprob,mfcc_n,img_size):
input_img=tf.reshape(X,shape=[-1,mfcc_n,img_size,1])
# conv_net
layer1=tf.nn.relu(tf.add(tf.nn.conv2d(input_img,W['w1'],strides=[1,1,1,1],padding='SAME'),b['b1']))
layer1=tf.nn.dropout(layer1,keepprob)
print(layer1)
layer2=tf.nn.relu(tf.add(tf.nn.conv2d(layer1,W['w2'],strides=[1,1,1,1],padding='SAME'),b['b2']))
layer2=tf.nn.dropout(layer2,keepprob)
print(layer2)
layer3=tf.nn.relu(tf.add(tf.nn.conv2d(layer2,W['w3'],strides=[1,1,1,1],padding='SAME'),b['b3']))
layer3=tf.nn.dropout(layer3,keepprob)
print(layer3)
layer4=tf.nn.relu(tf.add(tf.nn.conv2d(layer3,W['w4'],strides=[1,1,1,1],padding='SAME'),b['b4']))
layer4=tf.nn.max_pool(layer4,ksize=[1,2,2,1],strides=[1,2,1,1],padding='SAME')
#(13,20)
#layer4=tf.nn.dropout(layer2,keepprob)
layer4=tf.reshape(layer4,[-1,65])
print(layer4)
layer5= tf.matmul(layer4,W['dw1']+b['db1'])
layer6 = tf.matmul(layer5, W['dw2'] + b['db2'])
layer7 = tf.matmul(layer6, W['dw3'] + b['db3'])
layer8=tf.matmul(layer7,W['w5']+b['b5'])
print (layer8)
return layer8
'''
def recur_net(X,W,b):
#말과 말 사이의 시간을 타임 스텝으로 잡음. 10000/512 약 20개.
X=tf.unstack(X,20,1)
cell = tf.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
#cell=tf.nn.rnn_cell.BasicRNNCell(n_hidden)
outputs, states=tf.rnn.static_rnn(cell,X,dtype=tf.float32)
return tf.matmul(outputs[-1],W['out'])+b['out']
'''
|
|
import os
import shutil
import json
import time
import numpy as np
from scipy.misc import imsave
#from Environment.env import Actions
from pathlib import Path
from datetime import datetime
class Logger:
root = Path('files')
modelsRoot = Path('models')
path_rewards = Path('files/rewards/')
path_losses = Path('files/losses/')
path_meta = Path('files/metadata.json')
path_model_pi = Path('models/model_pi.model')
path_model_v = Path('models/model_v.model')
path_scores = Path('files/scores/')
path_state_images = Path('files/state_images/')
path_dnn_intermediate_images = Path('files/dnn_intermediate_images/')
path_metrics = Path('files/metrics/')
logger_initiated = False
def init():
if Logger.logger_initiated:
return
try:
Logger.create_folders_internal()
except:
Logger.create_folders_internal()
Logger.logger_initiated = True
return
def create_folders(lock, atari_name, cores, tmax, game_length, Tmax, C, gamma, lr):
lock.acquire()
Logger.create_folders_internal()
metadata = [time.strftime("%d/%m/%y"), atari_name, 'cores '+str(cores), 'tmax '+str(tmax), 'gl '+str(game_length), 'Tmax '+str(Tmax), 'C '+str(C), 'gamma '+str(gamma), 'lr '+str(lr)]
with open(Logger.path_meta, "w") as f:
f.write(json.dumps(metadata))
lock.release()
def create_folders_internal():
try:
if not os.path.exists(Logger.root):
# Delete if exists.
# print ('The folder named files will be deleted!')
# input ('Press Enter to continue.')
Logger.root.mkdir(exist_ok=True, parents=True)
else:
shutil.rmtree(Logger.root)
Logger.root.mkdir(exist_ok=True, parents=True)
except:
time.sleep(1)
Logger.root.mkdir(exist_ok=True, parents=True)
# Create the new folders.
Logger.path_rewards.mkdir(exist_ok=True, parents=True)
Logger.path_losses.mkdir(exist_ok=True, parents=True)
Logger.path_scores.mkdir(exist_ok=True, parents=True)
Logger.path_state_images.mkdir(exist_ok=True, parents=True)
Logger.path_dnn_intermediate_images.mkdir(exist_ok=True, parents=True)
Logger.path_metrics.mkdir(exist_ok=True, parents=True)
def log_state_image(boardData, steps, learner_id, action, stateShape):
Logger.init()
#pngfile = "testImage.png"
#pngWriter.write(pngfile, numpy.reshape(boardData, (-1, column_count * plane_count)))
timestr = time.strftime("%Y%m%d-%H%M%S_") + str(int(datetime.now().microsecond / 1000))
file_name = "stateImage_" + str(learner_id) + "_" + str(steps) + "_" + timestr + "_action_" + str(action) + ".png"
file_path = Logger.path_state_images / file_name
file_path.touch(exist_ok=True)
input_img = np.array(boardData)
# Reshape input to meet with CNTK expectations.
grayScaleImg = np.reshape(input_img, (stateShape[0], stateShape[1]))
imsave(file_path, grayScaleImg)
def log_dnn_intermediate_image(imageToSave, imgInfoStr):
Logger.init()
#pngfile = "testImage.png"
#pngWriter.write(pngfile, numpy.reshape(boardData, (-1, column_count * plane_count)))
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = imgInfoStr + "_" + timestr + ".png"
file_path = Logger.path_dnn_intermediate_images / file_name
imsave(file_path, imageToSave)
def log_metrics(info, iteration, learner_id):
Logger.init()
Logger.log_scores(iteration, learner_id, info['score'], info['oldScore'])
file_name = "metrics_" + str(learner_id) + ".txt"
file_path = Logger.path_metrics / file_name
file_path.touch(exist_ok=True)
with file_path.open(mode="a+") as f:
f.write("Step {0}: negativeRewardCount: {1}, zeroRewardCount: {2} positiveRewardCount: {3}\r\n".format(
iteration, info["negativeRewardCount"], info["zeroRewardCount"], info["positiveRewardCount"]))
file_name = "moves_" + str(learner_id) + ".txt"
file_path = Logger.path_metrics / file_name
file_path.touch(exist_ok=True)
with file_path.open(mode="a+") as f:
stringToPrint = ""
for i in range(len(info["numberOfTimesExecutedEachAction"])):
#actionName = Actions(i).name
stringToPrint += str(info["numberOfTimesExecutedEachAction"][i]) + ", " # actionName + ": " + \
stringToPrint += "\r\n"
f.write(stringToPrint)
def log_scores(iteration, learner_id, currentScore, oldScore):
Logger.init()
file_name = "score_" + str(learner_id) + ".txt"
file_path = Logger.path_scores / file_name
file_path.touch(exist_ok=True)
with file_path.open(mode="a+") as f:
f.write("Step {0}: PreviousScore: {1}, CurrentScore: {2}\r\n".format(
iteration, oldScore, currentScore))
def log_rewards(rewards, iteration, learner_id, rnd):
Logger.init()
file_name = "rwd_" + str(iteration) + "_" + str(learner_id) + "_" + str(rnd) + ".json"
file_path = Logger.path_rewards / file_name
file_path.touch(exist_ok=True)
with file_path.open(mode="w") as f:
f.write(json.dumps(rewards))
def log_losses(loss, iteration, learner_id):
Logger.init()
file_name = "loss_" + str(iteration) + "_" + str(learner_id) + ".json"
file_path = Logger.path_losses / file_name
file_path.touch(exist_ok=True)
with file_path.open(mode="w") as f:
f.write(json.dumps(loss))
def read_metadata():
Logger.init()
with open(Logger.path_meta, "r") as f:
data = json.load(f)
return data
def save_model(agent, shared_params):
Logger.init()
agent.save_model(shared_params, Logger.path_model_pi, Logger.path_model_v)
def load_model(net):
Logger.init()
if os.path.exists(Logger.path_model_pi) and os.path.exists(Logger.path_model_v):
net.load_model(Logger.path_model_pi, Logger.path_model_v)
|
|
######################################################
# Christoph Aurnhammer, 2019 #
# Pertaining to Aurnhammer, Frank (2019) #
# Comparing gated and simple recurrent neural #
# networks as models of human sentence processing #
# #
# Maintained at github.com/caurnhammer/gated_cells #
# Adpated from pytorch word_language_model #
######################################################
# coding: utf-8
import argparse
import time
import math
import numpy
import torch
import torch.optim as optim
from torch.autograd import Variable
from torch.nn.functional import log_softmax
from random import sample
# import scripts data.py and model.py
import data
import model
def parse_args():
# parse command line arguments, returned as attribute to object "args"
parser = argparse.ArgumentParser(description='PyTorch ENCOW RNN/GRU/LSTM Language Model')
parser.add_argument('--data', type=str, default='./corpus/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=400,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=500,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--lr', type=float, default=0.0025,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=1,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=1, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=41,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=10000, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='./output/',
help='path to save the final model')
args = parser.parse_args()
return args
def set_torch_seed(seed):
# Set the random seed for reproducibility across model types
torch.manual_seed(seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(seed)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i, evaluation=False):
seq_len = min(args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len])
target = Variable(source[i+1:i+1+seq_len].view(-1))
return data, target
def evaluate(rnn_model, data_source, criterion):
if args.cuda:
data_source = data_source.cuda()
# Turn on evaluation mode
rnn_model.eval()
# Define end of sentence index
eos = get_eos()
# Initiate variables for loss computation
ntokens = len(corpus.dictionary)
total_loss = 0
data_len = 1
# Loop through test data
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, evaluation=True)
for j in range(len(data)):
if (data[j].data == eos)[0] == True:
# Cut off data at end of sentence
data = data[:j,:1]
targets = targets[:j]
break
hidden = rnn_model.init_hidden(eval_batch_size)
output, hidden = rnn_model(data, hidden)
total_loss += len(data) * criterion(log_softmax(output.view(-1, ntokens), dim=1), targets).data
data_len += len(data)
return total_loss.item() / data_len
def shuffle_train_data(train_data):
# Randomise training data (according to current seed)
# Set numpy random seed to current seed
numpy.random.seed(torch.initial_seed())
# Convert to numpy array for shuffling (changing the np array changes the torch tensor as well)
train_data = train_data.cpu()
train_np = train_data.numpy()
# Shuffle using numpy methods
N = args.bptt # Blocks of N rows
M, n = train_np.shape[0] // N, train_np.shape[1] # Parameters for reshape (num sentences, num rows)
numpy.random.shuffle(train_np.reshape(M, -1, n))
del train_np
# After shuffling with numpy, set data to GPU
if args.cuda:
train_data = train_data.cuda()
return train_data
def get_eos():
if args.cuda:
a = torch.cuda.LongTensor([dictionary.word2idx['<eos>']])
else:
a = torch.LongTensor([dictionary.word2idx['<eos>']])
return a
def train(rnn_model, model_name, seed_index, criterion):
# Turn on training mode.
rnn_model.train()
total_loss = 0
start_time = time.time()
# Define optimize, set initial learning rate & constant momentum
lr = args.lr
optimizer = optim.SGD(rnn_model.parameters(), lr=lr, momentum=0.9)
# Define number of sentences at which to take snapshots
snapshots = [1000 - 1, 3000 - 1, 10000 - 1, 30000 - 1, 100000 - 1, 300000 - 1, 1000000 - 1, 3000000 - 1,
(len(train_data) // args.bptt) - 1]
# Define end of sentence index
eos = get_eos()
# Loop through training data
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# chr: cut off data at end of sentence
for j in range(len(data)):
if (data[j].data == eos)[0] == True:
# = end of sentence is reached - remove filler masking elements
# input data and targets
data = data[:j,:1]
targets = targets[:j]
break
# Reset hidden states for new sequence
hidden = rnn_model.init_hidden(args.batch_size)
rnn_model.zero_grad() # Set gradients to zero for the optimiser
output, hidden = rnn_model(data, hidden)
# Optimize network
loss = criterion(log_softmax(output.view(-1, ntokens), dim=1), targets)
loss.backward()
optimizer.step()
# Gradient clipping
torch.nn.utils.clip_grad_norm_(rnn_model.parameters(), args.clip)
total_loss += loss.data
# Print user feedback
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval # current loss
elapsed = time.time() - start_time # elapsed time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, args.lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Anneal learning rate in every 3rd of the training data
if batch in range(0, len(train_data) // args.bptt, len(train_data) // args.bptt // 3) and batch > 0:
lr /= math.sqrt(4.0)
optimizer = optim.SGD(rnn_model.parameters(), lr=lr, momentum=0.9)
print('Updated learning rate to {}'.format(lr))
if batch in snapshots:
with open(args.save+model_name+'_'+str(batch+1)+'_'+str(seed_index), 'wb') as f:
torch.save(rnn_model, f)
print('> Saved snapshot at {} sentences to disc'.format(batch+1))
return rnn_model
if __name__ == '__main__':
# Parse command line arguments
args = parse_args()
# Process corpus
corpus = data.Corpus(args.data, args.bptt)
dictionary = corpus.dictionary
ntokens = len(corpus.dictionary)
train_data = batchify(corpus.train, args.batch_size)
eval_batch_size = 1
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
# Generate reusable random seeds
seeds = sample(range(0, 4999), 6)
seed_indices = range(0, 6)
print('> chr: The seeds are {}, the seed indices are {}'.format(seeds, [ind for ind in seed_indices]))
# Loop through seeds (corresponding to models with same sentence order and same initial weights)
for seed_index, seed in zip(seed_indices, seeds):
# Set the torch random seed
torch.manual_seed(seed)
# Randomise sentence order in train data (using current seed)
train_data = shuffle_train_data(train_data)
# Initialise weights for all model types (using current seed)
arche = model.arche_RNN('LSTM', ntokens, args.emsize, args.nhid, args.nlayers)
arche.init_weights()
# For this sentence order and these weights, create on of each RNN types
models = ['RNN_TANH', 'GRU', 'LSTM']
for model_name in models:
# Initialise the rnn model
rnn_model = model.RNNModel(model_name, ntokens, args.emsize, args.nhid, args.nlayers,
arche.encoder.weight.data, arche.rnn.all_weights, arche.predecoder.bias.data,
arche.predecoder.weight.data, arche.decoder.bias.data, arche.decoder.weight.data)
if args.cuda:
rnn_model.cuda()
print('Initialised Model:', rnn_model.parameters)
# Define common criterion for training, validating, testing
criterion = torch.nn.NLLLoss()
# Loop through epochs.
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
rnn_model = train(rnn_model, model_name, seed_index, criterion)
# Evaluate on validation data after each epoch
val_loss = evaluate(rnn_model, val_data, criterion)
print('=' * 89)
print('| Validation | loss {:5.2f} | ppl {:8.2f}'.format(
val_loss, math.exp(val_loss)))
print('=' * 89)
# Evaluate on test data
test_loss = evaluate(rnn_model, test_data, criterion)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
|
|
import numpy as np
import re
import itertools
from collections import Counter
from konlpy.tag import Mecab
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def load_data_and_labels2(file_name):
positive_exams = []
negative_exams = []
positive_count = 0
negative_count = 0
exams = list(open(file_name, "r").readlines())
for s in exams:
splited = s.split('\t')
if splited[2] == '0\n':
negative_exams.append(splited[1])
negative_count = negative_count + 1
elif splited[2] == '1\n':
positive_exams.append(splited[1])
positive_count = positive_count + 1
else:
print (splited[0], splited[1], splited[2])
mecab = Mecab()
positive_result = []
for pp in positive_exams:
one_str = mecab.pos(pp)
str_result = ''
for p in one_str:
if p[1] in {'NNG', 'NNP', 'NNB', 'NNBC', 'VA', 'VV', 'SL', 'SN', 'SY'}:
str_result = p[0] + ' ' + str_result
positive_result.append(str_result)
positive_labels = [[0, 1] for _ in positive_result]
negative_result = []
for pp in negative_exams:
one_str = mecab.pos(pp)
str_result = ''
for p in one_str:
if p[1] in {'NNG', 'NNP', 'NNB', 'NNBC', 'VA', 'VV', 'SL', 'SN', 'SY'}:
str_result = p[0] + ' ' + str_result
negative_result.append(str_result)
negative_labels = [[1, 0] for _ in negative_result]
y = np.concatenate([positive_labels, negative_labels], 0)
x_text = positive_result + negative_result
return [x_text, y]
# data : x_train, y_train 의 zip 형태
# batch_size : 64
# num_epochs : 200
# x와 y를 짝을 지운후 shuffle로 섞어서 batch_size 만큼 return 해주는 함수
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
|
|
#!/usr/bin/env python
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import average_precision_score, roc_auc_score
from sklearn.ensemble import IsolationForest
import sys
if __name__ == "__main__":
filename = sys.argv[1]
ap = []
auc = []
with open(filename+"_Results.txt", "r") as f:
fields = f.readline().strip().split(" ")
while(fields != ['']):
scores = list((map(float, fields[1].split(" "))))
anomalyscores = -1.0 * np.array(scores)
ap.append(average_precision_score(y[:len(scores)], anomalyscores))
auc.append(roc_auc_score(y[len(scores)-256:len(scores)], anomalyscores[len(scores)-256:len(scores)]))
fields = f.readline().strip().split("\t")
print("xstream: AP =", np.mean(ap), "AUC =", np.mean(auc))
result = str(np.mean(ap)) + " " + str(np.mean(auc)) + "\n"
file1 = open("Results.txt","a")
file1.write(result)
file1.close()
|
|
'''
Created on Jan 6, 2014
@author: jbq
'''
import numpy
from logger import vlog, tr
import copy
class Interpolator(object):
'''
Evaluate the structure factor at a particular phase point for any
value of the external parameters
'''
def __init__(self, fseries, signalseries, errorseries=None, running_regr_type = 'linear', windowlength=3):
'''
Arguments:
[running_regr_type]: method for the local, runnig regression
Attributes:
range: minimum and maximum of the fseries
fitted: values of the structure factor at the external parameter values after the running regression
errors: errorseries or estimated errors at the external parameter values from the running regression
y: interpolator object for the struc ture factor (cubic spline)
e: interpolator object for the error (linear)
running_regr_type: type of running regression
windowlength: length of window where local regression is done. Select zero for no regression
'''
# Deal with possible errors
if len( fseries ) != len( signalseries ):
vlog.error( 'signal and external parameter series have different lenght!' )
self.running_regr_type = running_regr_type
self.windowlength = windowlength
self.range = ( fseries[ 0 ], fseries[ -1 ] )
# Do running regression, and if necessary estimate errors
if self.windowlength and running_regr_type == 'linear':
if self.windowlength < 3:
message = 'Linear regression requires a window length bigger than 2'
vlog.error(message)
raise ValueError(message)
from scipy.stats import linregress
if len( fseries ) < self.windowlength:
vlog.error( 'series has to contain at least {0} members'.format( windowlength ) )
else:
# Lower boundary, the first self.windowlength/2 values
x = fseries[ : self.windowlength ]
y = signalseries[ : self.windowlength ]
slope, intercept, r_value, p_value, std_err = linregress( x, y )
linF = lambda xx: intercept + slope * xx
self.fitted = []
for i in range(0, 1+self.windowlength/2):
self.fitted.append(linF(x[i]))
residuals = numpy.square(numpy.vectorize(linF)(x) - y)
residual = numpy.sqrt( numpy.mean(residuals)) #average residual
self.errors = [residual,] * (1+self.windowlength/2)
# Continue until hitting the upper boundary
index = 1 # lower bound of the regression window
while ( index + self.windowlength <= len( fseries ) ):
x = fseries[ index : index + self.windowlength ]
y = signalseries[ index : index + self.windowlength ]
slope, intercept, r_value, p_value, std_err = linregress( x, y )
linF = lambda xx: intercept + slope * xx
self.fitted.append(linF(x[self.windowlength/2]))
residuals = numpy.square(numpy.vectorize(linF)(x) - y)
residual = numpy.sqrt( numpy.mean(residuals)) #average residual
self.errors.append(residual)
# Resolve the upper boundary
if index + self.windowlength == len( fseries ):
for i in range(1+self.windowlength/2, self.windowlength):
self.fitted.append(linF(x[i]))
self.errors.append(residual)
index += 1
elif self.windowlength and running_regr_type == 'quadratic':
if self.windowlength < 4:
message = 'Quadratic regression requires a window length bigger than 3'
vlog.error(message)
raise ValueError(message)
from numpy import polyfit
if len( fseries ) < self.windowlength:
vlog.error( 'series has to contain at least {0} members'.format( self.windowlength ) )
else:
# Lower boundary, the first three values
x = fseries[ : self.windowlength ]
y = signalseries[ : self.windowlength ]
coeffs, residuals, rank, singular_values, rcond= polyfit(x,y,2, full=True) #second order polynomial
quadF = lambda xx: coeffs[0]*xx*xx + coeffs[1]*xx + coeffs[2]
self.fitted = []
for i in range(0, 1+self.windowlength/2):
self.fitted.append(quadF(x[i]))
residual = numpy.sqrt(numpy.mean( residuals )) #average residual
self.errors = [residual,] * (1+self.windowlength/2)
# Continue until hitting the upper boundary
index = 1 # lower bound of the regression window
while ( index + self.windowlength <= len( fseries ) ):
x = fseries[ index : index + self.windowlength ]
y = signalseries[ index : index + self.windowlength ]
coeffs, residuals, rank, singular_values, rcond = polyfit(x,y,2, full=True) #second order polynomial
quadF = lambda xx: coeffs[0]*xx*xx + coeffs[1]*xx + coeffs[2]
self.fitted.append(quadF(x[self.windowlength/2]))
residuals = numpy.square(numpy.vectorize(quadF)(x) - y)
residual = numpy.sqrt( numpy.mean(residuals)) #average residual
self.errors.append(residual)
# Resolve the upper boundary
if index + self.windowlength == len( fseries ):
for i in range(1+self.windowlength/2, self.windowlength):
self.fitted.append(quadF(x[i]))
self.errors.append(residual)
index += 1
else:
if self.windowlength == 0:
self.fitted = copy.copy(signalseries)
self.errors = [0.,] * len( fseries )
else:
vlog.warning( 'Requested regression type not recogized' )
# Passed errors take precedence over calculated errors
if errorseries is not None:
self.errors = errorseries
# Interpolators for fitted and errors
from scipy.interpolate import interp1d, UnivariateSpline
x = numpy.array( fseries )
y = numpy.array( self.fitted )
e = numpy.array( self.errors )
if e.any():
min_nonzero_error = numpy.min(e[numpy.nonzero(e)]) # smallest non-zero error
e = numpy.where(e >=min_nonzero_error, e, min_nonzero_error) # substitute zero errors with the smallest non-zero error
w = 1.0 / e
s = len( fseries )
else: # in the case of no errors, force the spline to pass through all points
w = numpy.ones(len(fseries))
s = 0
self.y = UnivariateSpline( x, y, w=w, s=s )
self.e = interp1d(x, e, kind='linear')
def __call__(self, fvalue):
''' Evalue the interpolators for the particular value of the external parameter '''
if self.range[0] <= fvalue <= self.range[1]:
return self.y(fvalue), float(self.e(fvalue))
else:
vlog.error( 'Value outside of interpolating bounds' )
return ( float( 'inf' ), float( 'inf' ) )
|
|
#Copyright 2022 Nathan Harwood
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy as np
from audiomodule.audiomodule import AM_CONTINUE, AM_CYCLIC_UNDERRUN, AM_INPUT_REQUIRED, AudioModule, audiomod
@audiomod
class Normalize(AudioModule):
name = "Normalize"
category = "Filter"
description = ("Maintain a running maximum of the input signal and produce "
"an output signal divided by that maximum.")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.max = 0.0
async def next_chunk(self):
underrun,cyclic = self.input_underrun()
if (not self.input_pending()) or underrun:
if cyclic:
return AM_CYCLIC_UNDERRUN
else:
return AM_INPUT_REQUIRED
signal = self.get_input_chunk().buffer[:,0]
self.max = max([self.max, np.max(np.abs(signal))])
if self.max>0.0:
signal = signal/self.max
self.send_signal(signal.reshape(self.chunk_size,self.out_chs[0]))
return AM_CONTINUE
def open(self):
super().open()
self.max = 0.0
def get_status(self):
return {
'bottom':'Peak {self.max:.3f}'
}
|
|
# %%
import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import label
from skimage import data
from skimage import color
from skimage.morphology import extrema
from skimage import exposure
from PIL import Image
from skimage.feature import peak_local_max
# %%
img = Image.open('C:\\Users\\Dell\\Desktop\\od_zacatku\\train_img\\train4.tiff')
imarray = np.array(img)
h = 0.3
x_0 = 70
y_0 = 354
width = 256
height = 256
imarray = exposure.rescale_intensity(imarray)
local_maxima = extrema.local_maxima(imarray)
label_maxima = label(local_maxima)
print(label_maxima.shape)
print(imarray.shape)
print(label_maxima.dtype)
print(imarray.dtype)
overlay = color.label2rgb(label_maxima, imarray, alpha=0.3, bg_label=0,
bg_color=None, colors=[(1, 0, 0)])
h_maxima = extrema.h_maxima(imarray, h)
label_h_maxima = label(h_maxima)
overlay_h = color.label2rgb(label_h_maxima, imarray, alpha=0.3, bg_label=0,
bg_color=None, colors=[(1, 0, 0)])
# %%
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].imshow(imarray[y_0:(y_0 + height), x_0:(x_0 + width)], cmap='gray',
interpolation='none')
ax[0].set_title('Original image')
ax[0].axis('off')
ax[1].imshow(overlay[y_0:(y_0 + height), x_0:(x_0 + width)],
interpolation='none')
ax[1].set_title('Local Maxima')
ax[1].axis('off')
ax[2].imshow(overlay_h[y_0:(y_0 + height), x_0:(x_0 + width)],
interpolation='none')
ax[2].set_title('h maxima for h = %.2f' % h)
ax[2].axis('off')
plt.show()
# %%
|
|
# Author: Vincent Zhang
# Mail: zhyx12@gmail.com
# ----------------------------------------------
import torch
from collections.abc import Sequence
from mmcv.runner import get_dist_info
from mmcv.parallel import MMDistributedDataParallel
import numpy as np
import random
import torch.distributed as dist
from mmcv.utils import build_from_cfg
from ..hooks import HOOKS
def move_data_to_gpu(cpu_data, gpu_id):
relocated_data = cpu_data
if isinstance(cpu_data, Sequence):
for ind, item in enumerate(cpu_data):
relocated_data[ind] = move_data_to_gpu(item, gpu_id)
elif isinstance(cpu_data, dict):
for key, item in cpu_data.items():
relocated_data[key] = move_data_to_gpu(item, gpu_id)
elif isinstance(cpu_data, torch.Tensor):
if cpu_data.device == torch.device('cpu'):
return cpu_data.to(gpu_id)
return relocated_data
def move_models_to_gpu(model, device, max_card=0, find_unused_parameters=False, broadcast_buffers=False):
"""
:param model:
:param device:
:param max_card:
:param find_unused_parameters:
:param broadcast_buffers: set default value to False, which is also adopted in mmcls/mmseg/mmdet, the real control lies in models builder.py
:return:
"""
#
rank, world_size = get_dist_info()
#
tmp_rank = rank * max_card + device
model = model.to('cuda:{}'.format(tmp_rank))
model = MMDistributedDataParallel(model, device_ids=[tmp_rank],
output_device=tmp_rank,
find_unused_parameters=find_unused_parameters,
broadcast_buffers=broadcast_buffers)
return model
def deal_with_val_interval(val_interval, max_iters, trained_iteration=0):
fine_grained_val_checkpoint = []
def reduce_trained_iteration(val_checkpoint):
new_val_checkpoint = []
start_flag = False
for tmp_checkpoint in val_checkpoint:
if start_flag:
new_val_checkpoint.append(tmp_checkpoint)
else:
if tmp_checkpoint >= trained_iteration:
if tmp_checkpoint > trained_iteration:
new_val_checkpoint.append(tmp_checkpoint)
start_flag = True
return new_val_checkpoint
if isinstance(val_interval, (int, float)):
val_times = int(max_iters / val_interval)
for i in range(1, val_times + 1):
fine_grained_val_checkpoint.append(i * int(val_interval))
if fine_grained_val_checkpoint[-1] != max_iters:
fine_grained_val_checkpoint.append(max_iters)
return reduce_trained_iteration(fine_grained_val_checkpoint)
elif isinstance(val_interval, dict):
current_checkpoint = 0
milestone_list = sorted(val_interval.keys())
assert milestone_list[0] > 0 and milestone_list[-1] <= max_iters, 'check val interval keys'
# 如果最后一个不是max_iter,则按最后的interval计算
if milestone_list[-1] != max_iters:
val_interval[max_iters] = val_interval[milestone_list[-1]]
milestone_list.append(max_iters)
last_milestone = 0
for milestone in milestone_list:
tmp_interval = val_interval[milestone]
tmp_val_times = int((milestone - last_milestone) / tmp_interval)
for i in range(tmp_val_times):
fine_grained_val_checkpoint.append(current_checkpoint + int(tmp_interval))
current_checkpoint += int(tmp_interval)
if fine_grained_val_checkpoint[-1] != milestone:
fine_grained_val_checkpoint.append(milestone)
current_checkpoint = milestone
last_milestone = current_checkpoint
return reduce_trained_iteration(fine_grained_val_checkpoint)
else:
raise RuntimeError('only single value or dict is acceptable for val interval')
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2 ** 31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def build_custom_hooks(hooks_args, runner):
if hooks_args is not None:
assert isinstance(hooks_args, list), \
f'custom_hooks expect list type, but got {type(hooks_args)}'
for hook_cfg in hooks_args:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
# hook_cfg.update({"runner", runner})
hook_cfg['runner'] = runner
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
|
import importlib
import os
import sys
import exputils
import imageio
import numpy as np
import torch
import autodisc as ad
from goalrepresent.datasets.image.imagedataset import LENIADataset
def collect_recon_loss_test_datasets(explorer):
statistic = dict()
test_dataset_idx = 0
for test_dataset in test_datasets:
statistic["data_{:03d}".format(test_dataset_idx)] = dict()
reconstructions_root = np.ones(test_dataset.n_images) * -1
reconstructions_leaf = np.ones(test_dataset.n_images) * -1
model = explorer.visual_representation.model
model.eval()
with torch.no_grad():
for test_data_idx in range(test_dataset.n_images):
test_data = test_dataset.__getitem__(test_data_idx)["obs"].unsqueeze(0)
all_nodes_outputs = model.network.depth_first_forward_whole_branch_preorder(test_data)
for node_idx in range(len(all_nodes_outputs)):
cur_node_path = all_nodes_outputs[node_idx][0][0]
if cur_node_path == '0':
node_outputs = all_nodes_outputs[node_idx][2]
loss_inputs = {key: node_outputs[key] for key in model.loss_f.input_keys_list}
node_losses = model.loss_f(loss_inputs)
recon_loss = node_losses["recon"].item()
reconstructions_root[test_data_idx] = recon_loss
elif cur_node_path in model.network.get_leaf_pathes():
node_outputs = all_nodes_outputs[node_idx][2]
loss_inputs = {key: node_outputs[key] for key in model.loss_f.input_keys_list}
node_losses = model.loss_f(loss_inputs)
recon_loss = node_losses["recon"].item()
reconstructions_leaf[test_data_idx] = recon_loss
else:
pass
statistic["data_{:03d}".format(test_dataset_idx)]['recon_loss_root'] = reconstructions_root
statistic["data_{:03d}".format(test_dataset_idx)]['recon_loss_leaf'] = reconstructions_leaf
test_dataset_idx += 1
return statistic
# def collect_goal_space_representations_test_datasets(explorer):
#
# statistic = dict()
#
# test_dataset_idx = 0
# for test_dataset in test_datasets:
# statistic["data_{:03d}".format(test_dataset_idx)] = dict()
#
# data = {}
#
#
# model = explorer.visual_representation.model
# for path in model.network.get_node_pathes():
# if "gs_{}".format(path) not in statistic["data_{:03d}".format(test_dataset_idx)]:
# data["gs_{}".format(path)] = None
#
# model.eval()
# with torch.no_grad():
# for path in model.network.get_node_pathes():
# cur_representation = []
#
# for test_data_idx in range(test_dataset.n_images):
# test_data = test_dataset.__getitem__(test_data_idx)["obs"].unsqueeze(0)
# representation = model.calc_embedding(test_data, path)
# cur_representation.append(representation.squeeze().cpu().detach().numpy())
#
# data["gs_{}".format(path)] = np.stack(cur_representation)
#
# for k,v in data.items():
#
# if len(np.shape(v)) == 1:
# v = np.array([v])
# else:
# v = np.array(v)
#
# statistic["data_{:03d}".format(test_dataset_idx)][k] = v
#
#
# test_dataset_idx += 1
#
# return statistic
def collect_final_observation(explorer):
data = dict()
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
obs = run_data.observations
else:
[obs, statistics] = explorer.system.run(run_parameters=run_data.run_parameters,
stop_conditions=explorer.config.stop_conditions)
# rescale values from [0 1] to [0 255] and convert to uint8 for saving as bw image
img_data = obs.states[-1] * 255
img_data = img_data.astype(np.uint8)
png_image = imageio.imwrite(
imageio.RETURN_BYTES,
img_data,
format='PNG-PIL')
data['{:06d}.png'.format(run_data.id)] = png_image
return data
def collect_representation(explorer):
data = dict()
model = explorer.visual_representation.model
if hasattr(model, "eval"):
model.eval()
if "ProgressiveTree" in model.__class__.__name__:
all_nodes = model.network.get_node_pathes()
n_latents = model.config.network.parameters.n_latents
for node_path in all_nodes:
data["gs_{}".format(node_path)] = []
with torch.no_grad():
for run_data in explorer.data:
obs = run_data.observations
if obs is None:
[obs, statistics] = explorer.system.run(run_parameters=run_data.run_parameters,
stop_conditions=explorer.config.stop_conditions)
x = torch.from_numpy(obs["states"][-1]).float().unsqueeze(0).unsqueeze(0)
x = model.push_variable_to_device(x)
all_nodes_outputs = model.network.depth_first_forward_whole_branch_preorder(x)
for node_idx in range(len(all_nodes_outputs)):
cur_node_path = all_nodes_outputs[node_idx][0][0]
cur_node_outputs = all_nodes_outputs[node_idx][2]
cur_gs_representation = cur_node_outputs["z"].squeeze().detach().cpu().numpy()
data["gs_{}".format(cur_node_path)].append(cur_gs_representation)
for node_path in all_nodes:
data["gs_{}".format(node_path)] = np.stack(data["gs_{}".format(node_path)], axis=0)
else:
n_latents = model.config.network.parameters.n_latents
data["gs_0"] = []
with torch.no_grad():
for run_data in explorer.data:
obs = run_data.observations
if obs is None:
[obs, statistics] = explorer.system.run(run_parameters=run_data.run_parameters,
stop_conditions=explorer.config.stop_conditions)
x = torch.from_numpy(obs["states"][-1]).float().unsqueeze(0).unsqueeze(0)
if hasattr(model, "push_variable_to_device"):
x = model.push_variable_to_device(x)
z = model.calc_embedding(x).squeeze().detach().cpu().numpy()
data["gs_0"].append(z)
data["gs_0"] = np.stack(data["gs_0"], axis=0)
return data
def collect_ids_per_node(explorer):
data = dict()
model = explorer.visual_representation.model
model.eval()
with torch.no_grad():
for path in model.network.get_node_pathes():
cur_gs_ids = []
for run_data in explorer.data:
obs = run_data.observations
if obs is None:
[obs, statistics] = explorer.system.run(run_parameters=run_data.run_parameters,
stop_conditions=explorer.config.stop_conditions)
x = torch.from_numpy(obs["states"][-1]).float().unsqueeze(0).unsqueeze(0)
cur_representation = model.calc_embedding(x, path).squeeze()
if not torch.isnan(cur_representation.sum()):
cur_gs_ids.append(run_data.id)
data["gs_{}".format(path)] = np.stack(cur_gs_ids)
return data
def load_explorer(experiment_directory):
# load the full explorer without observations and add its config
sys.path.append(experiment_directory)
explorer = ad.explorers.GoalSpaceExplorer.load_explorer(os.path.join(experiment_directory, 'results'), run_ids=[], map_location='cpu', load_observations=False, verbose=False)
explorer.data.config.load_observations = True
explorer.data.config.memory_size_observations = 1
spec = importlib.util.spec_from_file_location('experiment_config', os.path.join(experiment_directory, 'experiment_config.py'))
experiment_config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(experiment_config_module)
explorer.config = experiment_config_module.get_explorer_config()
# load test datasets
global test_datasets
test_datasets = []
test_dataset_config = ad.Config()
test_dataset_config.data_root = "/gpfswork/rech/zaj/ucf28eq/data/lenia_datasets/data_005/"
test_dataset_config.split = "train"
test_dataset_autodisc = LENIADataset(config = test_dataset_config)
test_datasets.append(test_dataset_autodisc)
return explorer
if __name__ == '__main__':
experiments = '.'
statistics = [#('final_observation', collect_final_observation, 'zip'),
#('representations', collect_representation),
#('ids_per_node', collect_ids_per_node),
('recon_loss_test_datasets', collect_recon_loss_test_datasets),
#('goal_space_representations_test_datasets', collect_goal_space_representations_test_datasets),
]
exputils.calc_experiment_statistics(statistics, load_explorer, experiments, recalculate_statistics=False, verbose=True)
|
|
import numpy as np
import pandas as pd
from ira.analysis import column_vector
from sklearn.base import BaseEstimator
from ira.analysis.timeseries import adx, atr
from ira.analysis.tools import ohlc_resample, rolling_sum
from qlearn import signal_generator
@signal_generator
class AdxFilter(BaseEstimator):
"""
ADX based trend filter. When adx > threshold
"""
def __init__(self, timeframe, period, threshold, smoother='ema'):
self.timeframe = timeframe
self.period = period
self.threshold = threshold
self.smoother = smoother
def fit(self, x, y, **kwargs):
return self
def predict(self, x):
a = adx(ohlc_resample(x, self.timeframe), self.period, smoother=self.smoother, as_frame=True).shift(1)
return a.ADX > self.threshold
@signal_generator
class AcorrFilter(BaseEstimator):
"""
Autocorrelation filter on returns series
If above is True (default) returns True for acorr > threshold
If above is False returns True for acorr < threshold
"""
def __init__(self, timeframe, lag, period, threshold, above=True):
self.lag = lag
self.period = period
self.threshold = threshold
self.timeframe = timeframe
self.above = above
def fit(self, x, y, **kwargs):
return self
def rolling_autocorrelation(self, x, lag, period):
"""
Timeseries rolling autocorrelation indicator
:param period: rolling window
:param lag: lagged shift used for finding correlation coefficient
"""
return x.rolling(period).corr(x.shift(lag))
def predict(self, x):
xr = ohlc_resample(x[['open', 'high', 'low', 'close']], self.timeframe)
returns = xr.close.pct_change()
ind = self.rolling_autocorrelation(returns, self.lag, self.period).shift(1)
return (ind > self.threshold) if self.above else (ind < self.threshold)
@signal_generator
class VolatilityFilter(BaseEstimator):
"""
Regime based on volatility
False: flat
True: volatile market
"""
def __init__(self, timeframe, instant_period, typical_period, factor=1):
self.instant_period = instant_period
self.typical_period = typical_period
self.factor = factor
self.timeframe = timeframe
def fit(self, x, y, **kwargs):
return self
def predict(self, x):
xr = ohlc_resample(x, self.timeframe)
inst_vol = atr(xr, self.instant_period).shift(1)
typical_vol = atr(xr, self.typical_period).shift(1)
return inst_vol > typical_vol * self.factor
@signal_generator
class AtrFilter(BaseEstimator):
"""
Raw ATR filter
"""
def __init__(self, timeframe, period, threshold, tz='UTC'):
self.timeframe = timeframe
self.period = period
self.threshold = threshold
self.tz = tz
def fit(self, x, y, **kwargs):
return self
def get_filter(self, x):
a = atr(ohlc_resample(x, self.timeframe, resample_tz=self.tz), self.period).shift(1)
return a > self.threshold
@signal_generator
class ChoppinessFilter(BaseEstimator):
"""
Volatile market leads to false breakouts, and not respecting support/resistance levels (being choppy),
We cannot know whether we are in a trend or in a range.
Values above 61.8% indicate a choppy market that is bound to breakout. We should be ready for some directional.
Values below 38.2% indicate a strong trending market that is bound to stabilize.
"""
def __init__(self, timeframe, period, upper=61.8, lower=38.2, tz='UTC', atr_smoother='sma'):
self.period = period
self.upper = upper
self.lower = lower
self.timeframe = timeframe
self.tz = tz
self.atr_smoother = atr_smoother
def fit(self, x, y, **kwargs):
return self
def predict(self, x):
xr = ohlc_resample(x[['open', 'high', 'low', 'close']], self.timeframe, resample_tz=self.tz)
a = atr(xr, self.period, self.atr_smoother)
rng = xr.high.rolling(window=self.period, min_periods=self.period).max() \
- xr.low.rolling(window=self.period, min_periods=self.period).min()
rs = pd.Series(rolling_sum(column_vector(a.copy()), self.period).flatten(), a.index)
ci = 100 * np.log(rs / rng) * (1 / np.log(self.period))
f0 = pd.Series(np.nan, ci.index)
f0[ci >= self.upper] = True
f0[ci <= self.lower] = False
return f0.ffill().fillna(False)
|
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import functools
import math
from fate_arch.session import computing_session as session
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.g_h_optim import PackedGHCompressor, GHPacker, fix_point_precision
from federatedml.secureprotol.encrypt import IterativeAffineEncrypt, PaillierEncrypt, FakeEncrypt
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.util import consts
import numpy as np
np.random.seed(114514)
def generate_bin_gh(num):
# (-1, 1)
g = np.random.random(num)
h = np.random.random(num)
g = g*2 - 1
return g, h
def generate_reg_gh(num, lower, upper):
g = np.random.random(num)
h = np.zeros(num) + 2
g = g * (upper - lower) + lower
return g, h
def cmp(a, b):
if a[0] > b[0]:
return 1
else:
return -1
def en_gh_list(g, h, en):
en_g = [en.encrypt(i) for i in g]
en_h = [en.encrypt(i) for i in h]
return en_g, en_h
def truncate(f, n=consts.TREE_DECIMAL_ROUND):
return math.floor(f * 10 ** n) / 10 ** n
def make_random_sum(collected_gh, g, h, en_g_l, en_h_l, max_sample_num):
selected_sample_num = np.random.randint(max_sample_num) + 1# at least 1 sample
idx = np.random.random(selected_sample_num)
idx = np.unique((idx * max_sample_num).astype(int))
print('randomly select {} samples'.format(len(idx)))
selected_g = g[idx]
selected_h = h[idx]
g_sum = selected_g.sum()
h_sum = selected_h.sum()
g_h_list = sorted(collected_gh, key=functools.cmp_to_key(cmp))
sum_gh = 0
en_g_sum = 0
en_h_sum = 0
for i in idx:
gh = g_h_list[i][1][0]
sum_gh += gh
en_g_sum += en_g_l[i]
en_h_sum += en_h_l[i]
return g_sum, h_sum, sum_gh, en_g_sum, en_h_sum, len(idx)
class TestFeatureHistogram(unittest.TestCase):
@staticmethod
def prepare_testing_data(g, h, en, max_sample_num, sample_id, task_type, g_min=None, g_max=None):
calculator = EncryptModeCalculator(encrypter=en)
packer = GHPacker(max_sample_num, en_calculator=calculator, sync_para=False, task_type=task_type,
g_min=g_min, g_max=g_max)
en_g_l, en_h_l = en_gh_list(g, h, en)
data_list = [(id_, (g_, h_)) for id_, g_, h_ in zip(sample_id, g, h)]
data_table = session.parallelize(data_list, 4, include_key=True)
en_table = packer.pack_and_encrypt(data_table)
collected_gh = list(en_table.collect())
return packer, en_g_l, en_h_l, en_table, collected_gh
@classmethod
def setUpClass(cls):
session.init("test_gh_packing")
cls.max_sample_num = 1000
cls.test_num = 10
cls.split_info_test_num = 200
key_length = 1024
sample_id = [i for i in range(cls.max_sample_num)]
# classification data
cls.g, cls.h = generate_bin_gh(cls.max_sample_num)
cls.iter_en = IterativeAffineEncrypt()
cls.iter_en.generate_key(key_length)
cls.iter_packer, cls.iter_en_g_l, cls.iter_en_h_l, cls.iter_en_table, cls.iter_collected_gh = \
cls.prepare_testing_data(cls.g, cls.h, cls.iter_en, cls.max_sample_num, sample_id, consts.CLASSIFICATION)
cls.p_en = PaillierEncrypt()
cls.p_en.generate_key(key_length)
cls.p_packer, cls.p_en_g_l, cls.p_en_h_l, cls.p_en_table, cls.p_collected_gh = \
cls.prepare_testing_data(cls.g, cls.h, cls.p_en, cls.max_sample_num, sample_id, consts.CLASSIFICATION)
cls.compressor = PackedGHCompressor(sync_para=False)
cls.compressor.compressor._padding_length, cls.compressor.compressor._capacity = \
cls.p_packer.packer.cipher_compress_suggest()
print('paillier compress para {}'.format(cls.p_packer.packer.cipher_compress_suggest()))
# regression data
cls.g_reg, cls.h_reg = generate_reg_gh(cls.max_sample_num, -1000, 1000)
cls.reg_p_packer, cls.reg_p_en_g_l, cls.reg_p_en_h_l, cls.reg_p_en_table, cls.reg_p_collected_gh = \
cls.prepare_testing_data(cls.g_reg, cls.h_reg, cls.p_en, cls.max_sample_num, sample_id, consts.REGRESSION,
g_min=-1000, g_max=1000)
cls.reg_compressor = PackedGHCompressor(sync_para=False)
cls.reg_compressor.compressor._padding_length, cls.reg_compressor.compressor._capacity = \
cls.reg_p_packer.packer.cipher_compress_suggest()
print('paillier compress para {}'.format(cls.p_packer.packer.cipher_compress_suggest()))
print('initialization done')
def run_gh_accumulate_test(self, test_num, collected_gh, en_g_l, en_h_l, packer, en, g, h, check=True):
print('{} test to run'.format(test_num))
for i in range(test_num):
print('executing test {}'.format(i))
g_sum, h_sum, en_sum, en_g_sum, en_h_sum, sample_num = make_random_sum(collected_gh, g, h,
en_g_l,
en_h_l,
self.max_sample_num)
de_num = en.raw_decrypt(en_sum)
unpack_num = packer.packer._unpack_an_int(de_num, packer.packer._bit_assignment[0])
g_sum_ = unpack_num[0] / fix_point_precision - sample_num * packer.g_offset
h_sum_ = unpack_num[1] / fix_point_precision
g_sum_2 = en.decrypt(en_g_sum)
h_sum_2 = en.decrypt(en_h_sum)
print(g_sum, h_sum)
print(g_sum_2, h_sum_2)
print(g_sum_, h_sum_)
g_sum, h_sum = truncate(g_sum), truncate(h_sum)
g_sum_, h_sum_ = truncate(g_sum_), truncate(h_sum_)
g_sum_2, h_sum_2 = truncate(g_sum_2), truncate(h_sum_2)
print(g_sum, h_sum)
print(g_sum_2, h_sum_2)
print(g_sum_, h_sum_)
if check:
# make sure packing result close to plaintext sum
self.assertTrue(g_sum_ == g_sum)
self.assertTrue(h_sum_ == h_sum)
print('passed')
def test_pack_gh_accumulate(self):
# test the correctness of gh packing(in comparision to plaintext)
# Iterative Affine
self.run_gh_accumulate_test(self.test_num, self.iter_collected_gh, self.iter_en_g_l, self.iter_en_h_l, self.iter_packer,
self.iter_en, self.g, self.h)
print('*'*30)
print('test iter done')
print('*'*30)
# Paillier
self.run_gh_accumulate_test(self.test_num, self.p_collected_gh, self.p_en_g_l, self.p_en_h_l, self.p_packer,
self.p_en, self.g, self.h)
print('*'*30)
print('test paillier done')
print('*'*30)
def test_split_info_cipher_compress(self):
# test the correctness of cipher compressing
print('testing binary')
collected_gh = self.p_collected_gh
en_g_l = self.p_en_g_l
en_h_l = self.p_en_h_l
packer = self.p_packer
en = self.p_en
sp_list = []
g_sum_list, h_sum_list = [], []
pack_en_list = []
for i in range(self.split_info_test_num):
g_sum, h_sum, en_sum, en_g_sum, en_h_sum, sample_num = make_random_sum(collected_gh, self.g, self.h,
en_g_l,
en_h_l,
self.max_sample_num)
sp = SplitInfo(sum_grad=en_sum, sum_hess=0, sample_count=sample_num)
sp_list.append(sp)
g_sum_list.append(g_sum)
h_sum_list.append(h_sum)
pack_en_list.append(en_sum)
print('generating split-info done')
packages = self.compressor.compress_split_info(sp_list[:-1], sp_list[-1])
print('package length is {}'.format(len(packages)))
unpack_rs = packer.decompress_and_unpack(packages)
case_id = 0
for s, g, h, en_gh in zip(unpack_rs, g_sum_list, h_sum_list, pack_en_list):
print('*'*10)
print(case_id)
case_id += 1
de_num = en.raw_decrypt(en_gh)
unpack_num = packer.packer._unpack_an_int(de_num, packer.packer._bit_assignment[0])
g_sum_ = unpack_num[0] / fix_point_precision - s.sample_count * packer.g_offset
h_sum_ = unpack_num[1] / fix_point_precision
print(s.sample_count)
print(s.sum_grad, g_sum_, g)
print(s.sum_hess, h_sum_, h)
# make sure cipher compress is correct
self.assertTrue(truncate(s.sum_grad) == truncate(g_sum_))
self.assertTrue(truncate(s.sum_hess) == truncate(h_sum_))
print('check passed')
def test_regression_cipher_compress(self):
# test the correctness of cipher compressing
print('testing regression')
collected_gh = self.reg_p_collected_gh
en_g_l = self.reg_p_en_g_l
en_h_l = self.reg_p_en_h_l
packer = self.reg_p_packer
en = self.p_en
sp_list = []
g_sum_list, h_sum_list = [], []
pack_en_list = []
for i in range(self.split_info_test_num):
g_sum, h_sum, en_sum, en_g_sum, en_h_sum, sample_num = make_random_sum(collected_gh, self.g_reg, self.h_reg,
en_g_l,
en_h_l,
self.max_sample_num)
sp = SplitInfo(sum_grad=en_sum, sum_hess=0, sample_count=sample_num)
sp_list.append(sp)
g_sum_list.append(g_sum)
h_sum_list.append(h_sum)
pack_en_list.append(en_sum)
print('generating split-info done')
packages = self.reg_compressor.compress_split_info(sp_list[:-1], sp_list[-1])
print('package length is {}'.format(len(packages)))
unpack_rs = packer.decompress_and_unpack(packages)
case_id = 0
for s, g, h, en_gh in zip(unpack_rs, g_sum_list, h_sum_list, pack_en_list):
print('*' * 10)
print(case_id)
case_id += 1
de_num = en.raw_decrypt(en_gh) # make sure packing result close to plaintext sum
unpack_num = packer.packer._unpack_an_int(de_num, packer.packer._bit_assignment[0])
g_sum_ = unpack_num[0] / fix_point_precision - s.sample_count * packer.g_offset
h_sum_ = unpack_num[1] / fix_point_precision
print(s.sample_count)
print(s.sum_grad, g_sum_, g)
print(s.sum_hess, h_sum_, h)
# make sure cipher compress is correct
self.assertTrue(truncate(s.sum_grad) == truncate(g_sum_))
self.assertTrue(truncate(s.sum_hess) == truncate(h_sum_))
print('check passed')
def test_regression_gh_packing(self):
# Paillier
self.run_gh_accumulate_test(self.test_num, self.reg_p_collected_gh, self.reg_p_en_g_l, self.reg_p_en_h_l, self.reg_p_packer,
self.p_en, self.g_reg, self.h_reg, check=False) # float error in regression is not controllable
@classmethod
def tearDownClass(self):
session.stop()
if __name__ == '__main__':
unittest.main()
|
|
"""Module otsun.outputs
Helper functions to format data for output
"""
import numpy as np
def spectrum_to_constant_step(file_in, wavelength_step, wavelength_min, wavelength_max):
data_array = np.loadtxt(file_in, usecols=(0, 1))
wl_spectrum = data_array[:, 0]
I_spectrum = data_array[:, 1]
array_inter = [[x, np.interp(x, wl_spectrum, I_spectrum)] for x in
np.arange(wavelength_min, wavelength_max + wavelength_step / 2.0, wavelength_step)]
return np.array(array_inter)
def make_histogram_from_experiment_results(results_wavelength, results_energy, step_wavelength, aperture_collector,
aperture_source):
y_energy = np.array(np.concatenate(results_energy))
y_energy = (y_energy / aperture_collector) / (1.0 / aperture_source)
x_wavelength = np.array(np.concatenate(results_wavelength))
data_ = np.array([x_wavelength, y_energy])
data_ = data_.T
bins_ = np.arange(int(np.amin(x_wavelength)), np.amax(x_wavelength) + step_wavelength * 1.1, step_wavelength)
table_ = np.histogram(data_[:, 0], bins=bins_, weights=data_[:, 1])
norm = np.histogram(data_[:, 0], bins=bins_)
u = np.divide(table_[0], norm[0])
bins = np.arange(int(np.amin(x_wavelength)), np.amax(x_wavelength) + step_wavelength, step_wavelength)
table_ = np.column_stack((bins, u))
return table_
def twoD_array_to_constant_step(twoD_array, step, wavelength_min, wavelength_max):
wl_spectrum = twoD_array[:, 0]
I_spectrum = twoD_array[:, 1]
array_inter = [[x, np.interp(x, wl_spectrum, I_spectrum)] for x in
np.arange(wavelength_min, wavelength_max + step / 2.0, step)]
return np.array(array_inter)
def spectral_response(optical_absorption_wavelength, iqe):
q_e = 1.60217662E-19
h = 6.62607E-34
c = 299792458.0
hc = h * c
opt_wavelength = optical_absorption_wavelength
if np.isreal(iqe):
SR = [[opt[0], iqe * opt[0] * opt[1] * q_e * 1E-9 / hc, ] for opt in opt_wavelength]
else:
data_array = np.loadtxt(iqe, usecols=(0, 1))
wl_ = data_array[:, 0]
iqe_ = data_array[:, 1]
SR = [[opt[0], np.interp(opt[0], wl_, iqe_) * opt[0] * opt[1] * q_e * 1E-9 / hc, ] for opt in opt_wavelength]
return np.array(SR)
def photo_current(spectral_response, source_spectrum):
wavelengths = source_spectrum[:, 0]
SR_by_spectrum = spectral_response[:, 1] * source_spectrum[:, 1]
photo_current = np.trapz(SR_by_spectrum, x=wavelengths)
return photo_current
# ---
# Helper functions for outputs in Total Analysis
# ---
def integral_from_data_file(file_in):
source_spectrum = np.loadtxt(file_in, usecols=(0, 1))
integral = np.trapz(source_spectrum[:, 1], x=source_spectrum[:, 0])
return integral
|
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import argparse
from copy import deepcopy
from kgcnn.utils.data import save_json_file, load_json_file
from kgcnn.utils.learning import LinearLearningRateScheduler
from sklearn.model_selection import KFold
from kgcnn.data.datasets.mutagenicity import MutagenicityDataset
from kgcnn.io.loader import NumpyTensorList
from kgcnn.utils.models import ModelSelection
from kgcnn.hyper.datasets import DatasetHyperSelection
# Input arguments from command line.
# A hyper-parameter file can be specified to be loaded containing a python dict for hyper.
parser = argparse.ArgumentParser(description='Train a graph network on Mutagenicity dataset.')
parser.add_argument("--model", required=False, help="Graph model to train.", default="GraphSAGE")
parser.add_argument("--hyper", required=False, help="Filepath to hyper-parameter config.", default=None)
args = vars(parser.parse_args())
print("Input of argparse:", args)
# Model identification.
model_name = args["model"]
ms = ModelSelection()
make_model = ms.make_model(model_name)
# Hyper-parameter identification.
if args["hyper"] is None:
# Default hyper-parameter for model if available.
hs = DatasetHyperSelection()
hyper = hs.get_hyper("Mutagenicity", model_name)
else:
hyper = load_json_file(args["hyper"])
# Loading Mutagenicity Dataset
hyper_data = hyper['data']
dataset = MutagenicityDataset()
data_name = dataset.dataset_name
data_length = dataset.length
# Data-set split
kf = KFold(n_splits=5, random_state=None, shuffle=True)
split_indices = kf.split(X=np.arange(data_length)[:, None])
dataloader = NumpyTensorList(*[getattr(dataset, x['name']) for x in hyper['model']['inputs']])
labels = np.expand_dims(dataset.graph_labels, axis=-1)
# Set learning rate and epochs
hyper_train = hyper['training']
epo = hyper_train['fit']['epochs']
epostep = hyper_train['fit']['validation_freq']
batch_size = hyper_train['fit']['batch_size']
train_loss = []
test_loss = []
acc_5fold = []
all_test_index = []
model = None
for train_index, test_index in split_indices:
# Make model.
model = make_model(**hyper['model'])
# Select train and test data.
is_ragged = [x['ragged'] for x in hyper['model']['inputs']]
xtrain, ytrain = dataloader[train_index].tensor(ragged=is_ragged), labels[train_index]
xtest, ytest = dataloader[test_index].tensor(ragged=is_ragged), labels[test_index]
# Compile model with optimizer and loss
optimizer = tf.keras.optimizers.get(deepcopy(hyper_train['optimizer']))
cbks = [tf.keras.utils.deserialize_keras_object(x) for x in hyper_train['callbacks']]
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
weighted_metrics=['accuracy'])
print(model.summary())
# Start and time training
start = time.process_time()
hist = model.fit(xtrain, ytrain,
validation_data=(xtest, ytest),
callbacks=[cbks],
**hyper_train['fit']
)
stop = time.process_time()
print("Print Time for taining: ", stop - start)
# Get loss from history
train_loss.append(np.array(hist.history['accuracy']))
val_acc = np.array(hist.history['val_accuracy'])
test_loss.append(val_acc)
acc_valid = np.mean(val_acc[-5:])
acc_5fold.append(acc_valid)
all_test_index.append([train_index, test_index])
# Make output directories
os.makedirs(data_name, exist_ok=True)
filepath = os.path.join(data_name, hyper['model']['name'])
os.makedirs(filepath, exist_ok=True)
# Plot training- and test-loss vs epochs for all splits.
plt.figure()
for x in train_loss:
plt.plot(np.arange(x.shape[0]), x, c='red', alpha=0.85)
for y in test_loss:
plt.plot((np.arange(len(y)) + 1) * epostep, y, c='blue', alpha=0.85)
plt.scatter([train_loss[-1].shape[0]], [np.mean(acc_5fold)],
label=r"Test: {0:0.4f} $\pm$ {1:0.4f}".format(np.mean(acc_5fold), np.std(acc_5fold)), c='blue')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('Mutagenicity Loss')
plt.legend(loc='upper right', fontsize='large')
plt.savefig(os.path.join(filepath, 'acc_mutagenicity.png'))
plt.show()
# Save keras-model to output-folder.
model.save(os.path.join(filepath, "model"))
# Save original data indices of the splits.
np.savez(os.path.join(filepath, "kfold_splits.npz"), all_test_index)
# Save hyper-parameter again, which were used for this fit.
save_json_file(hyper, os.path.join(filepath, "hyper.json"))
|
|
# Multiple linear Regerssion
# <---------------- Importing data ------------------->
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv') # ----> Set the correct path
X = dataset.iloc[:, :-1].values # ----> Set the correct columns
y = dataset.iloc[:, -1].values # ----> set the correct rows
# <------------- Taking care of missing data -------------------->
from sklearn.impute import SimpleImputer
missingvalues = SimpleImputer(missing_values = np.nan, strategy = 'mean', verbose = 0)
missingvalues = missingvalues.fit(X[:, 0:3])
X[:, 0:3]=missingvalues.transform(X[:, 0:3])
# <-------------- Encoding categorical data ------------------------->
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# Encoding the Independent Variable
ct = ColumnTransformer([('encoder', OneHotEncoder(), [3])], remainder='passthrough') # ---> correct columns
X = np.array(ct.fit_transform(X), dtype=np.float)
X = X[:, 1:]
# Encoding the Dependent Variable
#labelencoder_y = LabelEncoder()
#y = labelencoder_y.fit_transform(y) # ---> do this only when y id categorical
# <----------------------- Splitting to test and train --------------->
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# <------------------------- Feature Scaling ------------------------->
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# < ------------------- Multiple linear regression ------------------------>
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# making predictions
y_pred = regressor.predict(X_test)
plt.scatter(y_test, y_pred)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.title("y_pted vs y_test")
m,b = np.polyfit(y_test, y_pred, deg=1)
plt.plot(y_test, m*y_test+b)
|
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
import numpy as np
from jina.executors.rankers import Chunk2DocRanker
class TfIdfRanker(Chunk2DocRanker):
"""
:class:`TfIdfRanker` calculates the weighted score from the matched chunks. The weights of each chunk is based on
the tf-idf algorithm. Each query chunk is considered as a ``term``, and the frequency of the query chunk in a
specific matched document is considered as the naive ``term-frequency``. All the matched results as a whole is
considered as the corpus, and therefore the frequency of the query chunk in all the matched docs is considered
as the naive ``document-frequency``. Please refer to the functions for the details of calculating ``tf`` and
``idf``.
"""
required_keys = {'length', 'id'}
def __init__(self, threshold=0.1, *args, **kwargs):
"""
:param threshold: the threshold of matching scores. Only the matched chunks with a score that is higher or equal
to the ``threshold`` are counted as matched.
"""
super().__init__(*args, **kwargs)
self.threshold = threshold
def score(self, match_idx: 'np.ndarray', query_chunk_meta: Dict, match_chunk_meta: Dict) -> 'np.ndarray':
"""
:param match_idx: an `ndarray` of the size ``N x 4``. ``N`` is the batch size of the matched chunks for the
query doc. The columns correspond to the ``doc_id`` of the matched chunk, ``chunk_id`` of the matched chunk,
``chunk_id`` of the query chunk, and ``score`` of the matched chunk.
:param query_chunk_meta: a dict of meta info for the query chunks with **ONLY** the ``required_keys`` are kept.
:param match_chunk_meta: a dict of meta info for the matched chunks with **ONLY** the ``required_keys`` are
kept.
:return: an `ndarray` of the size ``M x 2``. ``M`` is the number of matched docs. The columns correspond to the
``doc_id`` and ``score``.
.. note::
In both `query_chunk_meta` and `match_chunk_meta`, ONLY the fields from the ``required_keys`` are kept.
"""
_groups = self.group_by_doc_id(match_idx)
r = []
_q_idf = self.get_idf(match_idx)
for _g in _groups:
_doc_id, _doc_score = self._get_score(_g, query_chunk_meta, match_chunk_meta, _q_idf)
r.append((_doc_id, _doc_score))
return self.sort_doc_by_score(r)
def get_idf(self, match_idx):
"""Get the idf dictionary for query chunks that matched a given doc.
:param match_idx: an `ndarray` of the size ``N x 4``. ``N`` is the batch size of the matched chunks for the
query doc. The columns correspond to the ``doc_id`` of the matched chunk, ``chunk_id`` of the matched chunk,
``chunk_id`` of the query chunk, and ``score`` of the matched chunk.
:return: a dict in the size of query chunks
.. note::
The 10-based logarithm version idf is used, i.e. idf = log10(total / df). ``df`` denotes the frequency of
the query chunk in the matched results. `total` denotes the total number of the matched chunks.
"""
_q_df, _q_id = self._get_df(match_idx)
_total_df = np.sum(_q_df)
return {idx: np.log10(_total_df / df + 1e-10) for idx, df in zip(_q_id, _q_df)}
def get_tf(self, match_idx, match_chunk_meta):
"""Get the tf dictionary for query chunks that matched a given doc.
:param match_idx: an `ndarray` of the size ``N x 4``. ``N`` is the number of chunks in a given doc that matched
with the query doc.
:param match_chunk_meta: a dict of meta info for the matched chunks with **ONLY** the ``required_keys`` are
kept.
:return: a dict in the size of query chunks
.. note::
The term-frequency of a query chunk is frequency of the query chunk that has a matching score equal or
higher than the ``threshold``.
To avoid the effects of long texts, the term-frequency of a query chunk is normalized by the total number of
chunks in the matched doc, i.e. tf = (n / n_doc). ``n`` denotes the frequency of the query chunk in the
matched doc. ``n_doc`` denotes the total number of chunks in the matched doc.
"""
q_tf_list, q_id_list, c_id_list = self._get_tf(match_idx)
return {q_idx: n / match_chunk_meta[doc_idx]['length']
for doc_idx, q_idx, n in zip(c_id_list, q_id_list, q_tf_list)}
def _get_df(self, match_idx):
"""Get the naive document frequency
:param match_idx: an `ndarray` of the size ``N x 4``. ``N`` is the number of chunks in a given doc that matched
with the query doc.
:return: a tuple of two `np.ndarray` in the size of ``M``, i.e. the document frequency array and the chunk id
array. ``M`` is the number of query chunks.
"""
a = match_idx[match_idx[:, self.col_query_chunk_id].argsort()]
q_id, q_df = np.unique(a[:, self.col_query_chunk_id], return_counts=True)
return q_df, q_id
def _get_tf(self, match_idx):
"""Get the naive term frequency of the query chunks
:param match_idx: an `ndarray` of the size ``N x 4``. ``N`` is the number of chunks in a given doc that matched
with the query doc.
:return: a tuple of three `np.ndarray` in the size of ``M``, i.e. the term frequency array, the query chunk id
array, and the matched chunk id array. ``M`` is the number of query chunks.
.. note::
The query chunks with matching scores that is lower than the threshold are dropped.
"""
_m = match_idx[match_idx[:, self.col_score] >= self.threshold]
_sorted_m = _m[_m[:, self.col_query_chunk_id].argsort()]
q_id_list, q_tf_list = np.unique(_sorted_m[:, self.col_query_chunk_id], return_counts=True)
row_id = np.cumsum(q_tf_list) - 1
c_id_list = _sorted_m[row_id, self.col_chunk_id]
return q_tf_list, q_id_list, c_id_list
def _get_score(self, match_idx, query_chunk_meta, match_chunk_meta, idf, *args, **kwargs):
"""Get the doc score based on the weighted sum of matching scores. The weights are calculated from the tf-idf of
the query chunks.
:param match_idx: an `ndarray` of the size ``N x 4``. ``N`` is the number of chunks in a given doc that matched
with the query doc.
:param tf: a dictionary with the query chunk id as key and the tf as value.
:param idf: a dictionary with the query chunk id as key and the idf as value.
:return: a scalar value of the weighted score.
"""
tf = self.get_tf(match_idx, match_chunk_meta)
_weights = match_idx[:, self.col_score]
_q_tfidf = np.vectorize(tf.get)(match_idx[:, self.col_query_chunk_id], 0) * \
np.vectorize(idf.get)(match_idx[:, self.col_query_chunk_id], 0)
_sum = np.sum(_q_tfidf)
_doc_id = self.get_doc_id(match_idx)
_score = 0. if _sum == 0 else np.sum(_weights * _q_tfidf) * 1.0 / _sum
return _doc_id, _score
|
|
'''
Example of a spike generator (only outputs spikes)
In this example spikes are generated and sent through UDP packages. At the end of the simulation a raster plot of the
spikes is created.
'''
import brian_no_units # Speeds up Brian by ignoring the units
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
number_of_neurons_total = 7
number_of_neurons_spiking = 3
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
delta_t=5
def nextspike():
# nexttime = numpy.random.uniform(50E-3,100E-3)
nexttime = 0
random_list=range(number_of_neurons_total)
while True:
numpy.random.shuffle(random_list)
# for i in random_list[0:numpy.random.randint(1,number_of_neurons_total+1)]:
for i in random_list[0:number_of_neurons_spiking]:
yield (i,nexttime)
nexttime = nexttime + 20E-3
SpikesOut = SpikeGeneratorGroup(number_of_neurons_total, nextspike, clock=simclock) # the maximum clock of the input spikes is limited here (period)
return ([SpikesOut],[],[])
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsOutput=number_of_neurons_total,
output_addresses=[("127.0.0.1", 18181)], simclock_dt=5, TotalSimulationTime=120000, brian_address=0)
|
|
'''
Integrates trajectory for many cycles
- tries to load previously computed cycles; starts from the last point available
- saved result in a separate file (e.g. phi_0_pt1.npy)
- finishes early if a trajectory almost converged to a fixed point
- IF integrated for many thousands of cycles - may want to uncomment `phi0 = phases_to_interval(phi0)`
after finishing each cycle
args: irun, ncycle, tol, save_every, sim_name
'''
# import packages needed below
import carpet
from carpet.various import phases_to_interval
from sim_physics import N, solve_cycle
import sys, os
import numpy as np
import logging
carpet.setup_logging('worker.log')
def solve_cycles_many(phi0, tol, ncycle, save_every, conv_eps):
'''
Returns trajectory: phis: [phi0, phi1, phi2,..]
Terminate when the distance travelled in one cycle less is than `termination_eps`, or when `ncycle` is reached.
'''
# save_every must be a positive integer; save_every = 1 => every cycle saved; save_every=2 => every 2nd saved
if save_every == 0:
raise NotImplementedError
phis = [phi0]
dphis_norm = []
ts = [0]
t = 0
save_counter = 0
for icycle in range(ncycle):
sol = solve_cycle(phi0, tol, ncycle=1)
phi1 = sol.y.T[-1] - 2 * np.pi
t += sol.t[-1]
# Save data once every `save_every` cycles
save_counter += 1
if save_counter == 1: # Add dphi for the first point & all points which got saved recently
dphi = (phi1 - phi0)
dphi_norm = np.sqrt(1 / N) * np.linalg.norm(dphi)
dphis_norm.append(dphi_norm)
# END if change in cycle is too small => (therefore close to a fixed point)
if dphi_norm < conv_eps:
return np.array(phis), np.array(ts), np.array(dphis_norm)
# For small dphi; with zero mean phase; the norm above is equivalent to
# `np.sqrt(1 - carpet.order_parameter(dphi) ** 2)`
if save_counter == save_every:
phis.append(phi1)
ts.append(t)
save_counter = 0 # reset save counter
phi0 = phi1.copy() # set initial condition for the next cycle
# phi0 = phases_to_interval(phi0)
return np.array(phis), np.array(ts), np.array(dphis_norm)
def get_traj_filename(irun, ipart, path):
if ipart == 0:
return os.path.join(path, f'phi_{irun}.npy')
else:
return os.path.join(path, f'phi_{irun}_pt{ipart}.npy')
def get_ts_filename(irun, ipart, path):
if ipart == 0:
return os.path.join(path, f'ts_{irun}.npy')
else:
return os.path.join(path, f'ts_{irun}_pt{ipart}.npy')
def load_phis(irun, path):
'''
Read previous parts of the trajectory
'''
phis_list = []
for ipart in range(64):
filename = get_traj_filename(irun, ipart, path)
if os.path.isfile(filename):
phis_pt = np.load(filename)
phis_list.append(phis_pt)
else:
break
return np.concatenate(phis_list) # trajectory glued back from parts
## Prepare input
irun, ncycle_total, tol, save_every, sim_name = int(sys.argv[1]), int(sys.argv[2]), float(sys.argv[3]), \
int(sys.argv[4]), str(sys.argv[5])
# Folder names
objfolder = f'obj/{sim_name}/'
outfolder = f'out/{sim_name}/'
conv_eps = 0.99e-4
# Find how many parts the trajectory already has
ipart_last = None
# Find the last existing part of the trajectory
for i in range(64): # maximum number of parts
filename = get_traj_filename(irun, i, outfolder)
if os.path.isfile(filename):
ipart_last = i
else:
break
# If trajectory exists -> load, get initial condition and number of cycles
if ipart_last is not None:
phis_old = load_phis(irun, outfolder)
ncycle_old = (len(phis_old) - 1) * save_every # assume that input save_every is the same as used in prev. sims!
phi0 = phis_old[-1]
ipart = ipart_last + 1
del phis_old # free up memory
else:
ipart = 0
ncycle_old = 0
# Load input
input_filename = objfolder + f'phi0_{irun}.npy'
phi0 = np.load(input_filename)
## Run simulation
ncycle_extra = ncycle_total - ncycle_old
if ncycle_extra > 0:
phis, ts = solve_cycles_many(phi0, tol, ncycle_extra, save_every, conv_eps)
if ipart > 0: # remove the first point because it's the same as the last point in the first part
phis = phis[1:]
ts = ts[1:]
## Save output
if len(phis) > 1: # length = 1 if imeediately finished simulation AND part > 0
os.makedirs(outfolder, exist_ok=True)
# Mean and std frequency
# Time points
filename = get_ts_filename(irun, ipart, outfolder)
np.save(filename, ts)
# Phases - saved the last to make sure that everything else is saved as well
filename = get_traj_filename(irun, ipart, outfolder)
np.save(filename, phis)
|
|
import numpy as np
##A script for creating tables for each cancer, with the data sorted
def compare(first,second):
if float(first[-2])>float(second[-2]):
return 1
elif float(first[-2])<float(second[-2]):
return -1
else:
return 0
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
##create necessary dictionaries
##need to get the gene ids from a RNA-SEQV2 file, any file will work
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna','unc.edu.0cbec58e-f95e-4c60-a85d-210dc56bdf3c.1545137.rsem.genes.normalized_results'))
f.readline()
TCGA_id_to_gene={}
data=[i.split()[0] for i in f]
for i in data:
TCGA_id_to_gene[i.split('|')[1]]=i.split('|')[0]
##this gene_result file is from http://www.ncbi.nlm.nih.gov/gene, downloaded Jan. 2016
f=open(os.path.join(BASE_DIR,'tables','gene_result.txt'))
f.readline()
current_id_to_gene={}
for i in f:
x=i.split('\t')
current_id_to_gene[x[2]]=x[5]
##I manually curated ids that got changed and created this file
new_ids={}
f=open(os.path.join(BASE_DIR,'tables','new_ids_annotated.txt'))
data=[i.strip().split() for i in f]
for i in data:
if i[2]!='None':
new_ids[i[0]]=[i[2],i[3]]
else:
new_ids[i[0]]='None'
for cancer in ['BLCA','BRCA','CESC','COAD','ESCA','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','PAAD',\
'READ','SARC','SKCM','STAD','UCEC']:
f=open(os.path.join(BASE_DIR,'mrna','cox',cancer,'coeffs_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,pvalues,adjusted=zip(*data)
cox_results=zip(ids,[TCGA_id_to_gene[i] for i in ids],coeffs,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'mrna','cox',cancer,'final_genes.txt'))
expression={}
data=[eval(i.strip()) for i in f]
for i in data:
for j in i:
expression[j[0]]=expression.get(j[0],[])+[j[1]]
f=open(os.path.join(BASE_DIR,'tables','S1',cancer+'.txt'),'w')
for i in sorted(cox_results,cmp=compare):
f.write(i[0])
f.write('\t')
f.write(i[1])
f.write('\t')
if i[0] in current_id_to_gene:
f.write(i[0])
f.write('\t')
f.write(current_id_to_gene[i[0]])
f.write('\t')
elif i[0] in new_ids:
if new_ids[i[0]]=='None':
f.write('None')
f.write('\t')
f.write('None')
f.write('\t')
else:
f.write(new_ids[i[0]][0])
f.write('\t')
f.write(new_ids[i[0]][1])
f.write('\t')
else:
raise
f.write(i[2])
f.write('\t')
f.write(i[3])
f.write('\t')
f.write(i[4])
f.write('\t')
f.write(str(np.median(expression[i[0]])))
f.write('\t')
f.write(str(np.mean(expression[i[0]])))
f.write('\n')
f.close()
del expression
|
|
from typing import List
import numpy as np
Tensor = List[float]
def single_output(xdata: List[Tensor], ydata: List[Tensor]) -> List[Tensor]:
xdata = np.asarray(xdata)
ydata = np.asarray(ydata)
|
|
# -*- coding: utf-8 -*-
import logging
import six
from six.moves import zip, map
import numpy as np
import vtool as vt
import utool as ut
from wbia.control import controller_inject
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
# Create dectorator to inject functions in this module into the IBEISController
CLASS_INJECT_KEY, register_ibs_method = controller_inject.make_ibs_register_decorator(
__name__
)
# TODO : make a annot_tags file
ANNOTMATCH_PROPS_STANDARD = [
# 'SceneryMatch',
# 'Photobomb',
# 'Hard',
# 'NonDistinct',
]
ANNOTMATCH_PROPS_OTHER = [
'SceneryMatch',
'Photobomb',
'Hard',
'NonDistinct',
'Occlusion',
'Viewpoint',
'MildViewpoint',
'Pose',
'Lighting',
'Quality', # quality causes failure
'Orientation', # orientation caused failure
'EdgeMatch', # descriptors on the edge of the naimal produce strong matches
'Interesting', # flag a case as interesting
'JoinCase', # case should actually be marked as correct
'SplitCase', # case should actually be marked as correct
'random', # gf case has random matches, the gt is to blame
'BadShoulder', # gf is a bad shoulder match
'BadTail', # gf is a bad tail match
'TimeDeltaError',
# These annots have almost the same information
'NearDuplicate',
'CorrectPhotobomb', # FIXME: this is a terrible name
]
OLD_ANNOTMATCH_PROPS = [
'TooLargeMatches', # really big nondistinct matches
'TooSmallMatches', # really big nondistinct matches
'ScoringIssue', # matches should be scored differently
'BadCoverage', # matches were not in good places (missing matches)
'ViewpointOMG', # gf is a bad tail match
'ViewpointCanDo', # gf is a bad tail match
'shouldhavemore',
'Shadowing', # shadow causes failure
'success', # A good success case
'GoodCoverage', # matches were spread out correctly (scoring may be off though)
]
# Changes to prop names
PROP_MAPPING = {
'ViewpointCanDo': 'Correctable',
'ViewpointOMG': 'Uncorrectable',
'Shadowing': 'Lighting',
'success': None,
'GoodCoverage': None,
# 'Hard' : 'NeedsWork',
'shouldhavemore': 'NeedsWork',
'BadCoverage': 'NeedsWork',
'ScoringIssue': 'NeedsWork',
'TooSmallMatches': 'FeatureScale',
'TooLargeMatches': 'FeatureScale',
# 'BadShoulder' : 'BadShoulder',
# 'GoodCoverage': None,
}
for key, val in PROP_MAPPING.items():
if key in ANNOTMATCH_PROPS_OTHER:
ANNOTMATCH_PROPS_OTHER.remove(key)
if val is not None and val not in ANNOTMATCH_PROPS_OTHER:
ANNOTMATCH_PROPS_OTHER.append(val)
ANNOTMATCH_PROPS_OTHER_SET = set([_.lower() for _ in ANNOTMATCH_PROPS_OTHER])
ANNOTMATCH_PROPS_OLD_SET = set([_.lower() for _ in OLD_ANNOTMATCH_PROPS])
# ANNOTMATCH_PROPS_STANDARD_SET = set([_.lower() for _ in ANNOTMATCH_PROPS_STANDARD])
def consolodate_annotmatch_tags(old_tags):
# return case_tags
remove_tags = [
'hard',
'needswork',
'correctable',
'uncorrectable',
'interesting',
'splitcase',
'joincase',
# 'orientation',
'random',
# 'badtail', 'badshoulder', 'splitcase', 'joincase', 'goodcoverage', 'interesting', 'hard'
]
tags_dict = {
# 'quality': 'Quality',
# 'scoringissue': 'ScoringIssue',
# 'orientation': 'Orientation',
# 'orientation': 'MildViewpoint',
'orientation': 'Viewpoint',
# 'pose': 'SimilarPose',
'pose': 'NonDistinct',
# 'lighting': 'Lighting',
# 'occlusion': 'Occlusion',
# 'featurescale': 'FeatureScale',
# 'edgematch': 'EdgeMatches',
# 'featurescale': 'Pose',
# 'featurescale': 'FeatureScale',
'nondistinct': 'NonDistinct',
'featurescale': 'NonDistinct',
'edgematch': 'SimilarPose',
'badtail': 'NonDistinct',
'badshoulder': 'NonDistinct',
# 'mildviewpoint': 'MildViewpoint',
'mildviewpoint': 'Viewpoint',
# 'toolargematches': 'CoarseFeatures',
# 'badcoverage': 'LowCoverage',
# 'shouldhavemore': 'LowCoverage',
# 'viewpoint': 'Viewpoint',
}
def filter_tags(tags):
return [t for t in tags if t.lower() not in remove_tags]
def map_tags(tags):
return [tags_dict.get(t.lower(), t) for t in tags]
def cap_tags(tags):
return [t[0].upper() + t[1:] for t in tags]
filtered_tags = list(map(filter_tags, old_tags))
mapped_tags = list(map(map_tags, filtered_tags))
unique_tags = list(map(ut.unique_ordered, mapped_tags))
new_tags = list(map(cap_tags, unique_tags))
return new_tags
def rename_and_reduce_tags(ibs, annotmatch_rowids):
"""
Script to update tags to newest values
CommandLine:
python -m wbia.tag_funcs --exec-rename_and_reduce_tags --db PZ_Master1
Ignore:
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> #ibs = wbia.opendb(defaultdb='PZ_Master1')
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> annotmatch_rowids = filter_annotmatch_by_tags(ibs, min_num=1)
>>> rename_and_reduce_tags(ibs, annotmatch_rowids)
"""
tags_list_ = get_annotmatch_case_tags(ibs, annotmatch_rowids)
def fix_tags(tags):
return {six.text_type(t.lower()) for t in tags}
tags_list = list(map(fix_tags, tags_list_))
prop_mapping = {
six.text_type(key.lower()): val for key, val in six.iteritems(PROP_MAPPING)
}
bad_tags = fix_tags(prop_mapping.keys())
for rowid, tags in zip(annotmatch_rowids, tags_list):
old_tags = tags.intersection(bad_tags)
for tag in old_tags:
ibs.set_annotmatch_prop(tag, [rowid], [False])
new_tags = ut.dict_take(prop_mapping, old_tags)
for tag in new_tags:
if tag is not None:
ibs.set_annotmatch_prop(tag, [rowid], [True])
def get_cate_categories():
standard = ANNOTMATCH_PROPS_STANDARD
other = ANNOTMATCH_PROPS_OTHER
# case_list = standard + other
return standard, other
def export_tagged_chips(ibs, aid_list, dpath='.'):
"""
DEPRICATE
CommandLine:
python -m wbia.tag_funcs --exec-export_tagged_chips --tags Hard interesting needswork --db PZ_Master1
python -m wbia.tag_funcs --exec-export_tagged_chips --logic=or --any_startswith quality occlusion --has_any lighting needswork interesting hard --db GZ_Master1 --dpath=/media/raid
python -m wbia.tag_funcs --exec-export_tagged_chips --db GZ_Master1 --min_num=1 --dpath /media/raid
Example:
>>> # SCRIPT
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=ut.ddict(list, logic=str))
>>> ut.print_dict(kwargs, 'filter args')
>>> aid_list = ibs.filter_annots_by_tags(**kwargs)
>>> print('len(aid_list) = %r' % (len(aid_list),))
>>> dpath = ut.get_argval('--dpath', default='')
>>> all_tags = ut.flatten(ibs.get_annot_all_tags(aid_list))
>>> filtered_tag_hist = ut.dict_hist(all_tags)
>>> ut.print_dict(filtered_tag_hist, key_order_metric='val')
>>> export_tagged_chips(ibs, aid_list, dpath)
"""
visual_uuid_hashid = ibs.get_annot_hashid_visual_uuid(aid_list)
zip_fpath = ut.unixjoin(
dpath, 'exported_chips2_' + ibs.get_dbname() + visual_uuid_hashid + '.zip'
)
chip_fpath = ibs.get_annot_chip_fpath(aid_list)
ut.archive_files(zip_fpath, chip_fpath, common_prefix=True)
@register_ibs_method
def filter_annots_by_tags(ibs, aid_list=None, **kwargs):
"""
Filter / Find / Search for annotations with particular tags
CommandLine:
python -m wbia.tag_funcs --exec-filter_annots_by_tags --helpx
python -m wbia.tag_funcs --exec-filter_annots_by_tags --db GZ_Master1
python -m wbia.tag_funcs --exec-filter_annots_by_tags --db GZ_Master1 --min_num=1
python -m wbia.tag_funcs --exec-filter_annots_by_tags --db GZ_Master1 --has_any=lighting --has_all=lighting:underexposed --show
SeeAlso:
python -m wbia.init.filter_annots --exec-filter_annots_general
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=ut.ddict(list, logic=str))
>>> ut.print_dict(kwargs, 'filter args')
>>> aid_list = ibs.filter_annots_by_tags(aid_list, **kwargs)
>>> print('len(aid_list) = %r' % (len(aid_list),))
>>> # print results
>>> all_tags = ut.flatten(ibs.get_annot_all_tags(aid_list))
>>> filtered_tag_hist = ut.dict_hist(all_tags)
>>> ut.print_dict(filtered_tag_hist, key_order_metric='val')
>>> print('len(aid_list) = %r' % (len(aid_list),))
>>> print('sum(tags) = %r' % (sum(filtered_tag_hist.values()),))
>>> ut.quit_if_noshow()
>>> import wbia.viz.interact
>>> wbia.viz.interact.interact_chip.interact_multichips(ibs, aid_list)
>>> ut.show_if_requested()
"""
if aid_list is None:
aid_list = ibs.get_valid_aids()
tags_list = ibs.get_annot_all_tags(aid_list)
flags = filterflags_general_tags(tags_list, **kwargs)
aid_list = ut.compress(aid_list, flags)
return aid_list
@register_ibs_method
def filterflags_annot_tags(ibs, aid_list, **kwargs):
"""
Filter / Find / Search for annotations with particular tags
"""
tags_list = ibs.get_annot_all_tags(aid_list)
flags = filterflags_general_tags(tags_list, **kwargs)
return flags
@register_ibs_method
def get_aidpair_tags(ibs, aid1_list, aid2_list, directed=True):
r"""
Args:
ibs (IBEISController): wbia controller object
aid1_list (list):
aid2_list (list):
directed (bool): (default = True)
Returns:
list: tags_list
CommandLine:
python -m wbia.tag_funcs --exec-get_aidpair_tags --db PZ_Master1 --tags Hard interesting
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> has_any = ut.get_argval('--tags', type_=list, default=None)
>>> min_num = ut.get_argval('--min_num', type_=int, default=1)
>>> aid_pairs = filter_aidpairs_by_tags(ibs, has_any=has_any, min_num=1)
>>> aid1_list = aid_pairs.T[0]
>>> aid2_list = aid_pairs.T[1]
>>> undirected_tags = get_aidpair_tags(ibs, aid1_list, aid2_list, directed=False)
>>> tagged_pairs = list(zip(aid_pairs.tolist(), undirected_tags))
>>> print(ut.repr2(tagged_pairs))
>>> tag_dict = ut.groupby_tags(tagged_pairs, undirected_tags)
>>> print(ut.repr2(tag_dict, nl=2))
>>> print(ut.repr2(ut.map_dict_vals(len, tag_dict)))
"""
aid_pairs = np.vstack([aid1_list, aid2_list]).T
if directed:
annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(
aid_pairs.T[0], aid_pairs.T[1]
)
tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid)
else:
annotmatch_rowid = ibs.get_annotmatch_rowid_from_undirected_superkey(
aid_pairs.T[0], aid_pairs.T[1]
)
tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid)
if False:
expanded_aid_pairs = np.vstack([aid_pairs, aid_pairs[:, ::-1]])
expanded_annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(
expanded_aid_pairs.T[0], expanded_aid_pairs.T[1]
)
expanded_edgeids = vt.get_undirected_edge_ids(expanded_aid_pairs)
unique_edgeids, groupxs = vt.group_indices(expanded_edgeids)
expanded_tags_list = ibs.get_annotmatch_case_tags(expanded_annotmatch_rowid)
grouped_tags = vt.apply_grouping(
np.array(expanded_tags_list, dtype=object), groupxs
)
undirected_tags = [list(set(ut.flatten(tags))) for tags in grouped_tags]
edgeid2_tags = dict(zip(unique_edgeids, undirected_tags))
input_edgeids = expanded_edgeids[: len(aid_pairs)]
tags_list = ut.dict_take(edgeid2_tags, input_edgeids)
return tags_list
@register_ibs_method
def filter_aidpairs_by_tags(
ibs, has_any=None, has_all=None, min_num=None, max_num=None, am_rowids=None
):
"""
list(zip(aid_pairs, undirected_tags))
"""
# annotmatch_rowids = ibs.get_annotmatch_rowids_from_aid(aid_list)
filtered_annotmatch_rowids = filter_annotmatch_by_tags(
ibs,
am_rowids,
has_any=has_any,
has_all=has_all,
min_num=min_num,
max_num=max_num,
)
aid1_list = np.array(ibs.get_annotmatch_aid1(filtered_annotmatch_rowids))
aid2_list = np.array(ibs.get_annotmatch_aid2(filtered_annotmatch_rowids))
aid_pairs = np.vstack([aid1_list, aid2_list]).T
# Dont double count
vt.get_undirected_edge_ids(aid_pairs)
xs = vt.find_best_undirected_edge_indexes(aid_pairs)
aid1_list = aid1_list.take(xs)
aid2_list = aid2_list.take(xs)
aid_pairs = np.vstack([aid1_list, aid2_list]).T
return aid_pairs
# directed_tags = get_aidpair_tags(ibs, aid_pairs.T[0], aid_pairs.T[1], directed=True)
# valid_tags_list = ibs.get_annotmatch_case_tags(filtered_annotmatch_rowids)
def filter_annotmatch_by_tags(ibs, annotmatch_rowids=None, **kwargs):
r"""
ignores case
Args:
ibs (IBEISController): wbia controller object
flags (?):
Returns:
list
CommandLine:
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --min-num=1
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags JoinCase
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags SplitCase
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags occlusion
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags viewpoint
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags SceneryMatch
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags Photobomb
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db GZ_Master1 --tags needswork
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> #ibs = wbia.opendb(defaultdb='testdb1')
>>> ibs = wbia.opendb(defaultdb='PZ_Master1')
>>> #tags = ['Photobomb', 'SceneryMatch']
>>> has_any = ut.get_argval('--tags', type_=list, default=['SceneryMatch', 'Photobomb'])
>>> min_num = ut.get_argval('--min_num', type_=int, default=1)
>>> prop = has_any[0]
>>> filtered_annotmatch_rowids = filter_annotmatch_by_tags(ibs, None, has_any=has_any, min_num=min_num)
>>> aid1_list = np.array(ibs.get_annotmatch_aid1(filtered_annotmatch_rowids))
>>> aid2_list = np.array(ibs.get_annotmatch_aid2(filtered_annotmatch_rowids))
>>> aid_pairs = np.vstack([aid1_list, aid2_list]).T
>>> # Dont double count
>>> xs = vt.find_best_undirected_edge_indexes(aid_pairs)
>>> aid1_list = aid1_list.take(xs)
>>> aid2_list = aid2_list.take(xs)
>>> valid_tags_list = ibs.get_annotmatch_case_tags(filtered_annotmatch_rowids)
>>> print('valid_tags_list = %s' % (ut.repr2(valid_tags_list, nl=1),))
>>> #
>>> print('Aid pairs with has_any=%s' % (has_any,))
>>> print('Aid pairs with min_num=%s' % (min_num,))
>>> print('aid_pairs = ' + ut.repr2(list(zip(aid1_list, aid2_list))))
>>> # Show timedelta info
>>> ut.quit_if_noshow()
>>> timedelta_list = ibs.get_annot_pair_timedelta(aid1_list, aid2_list)
>>> import wbia.plottool as pt
>>> pt.draw_timedelta_pie(timedelta_list, label='timestamp of tags=%r' % (has_any,))
>>> ut.show_if_requested()
"""
if annotmatch_rowids is None:
annotmatch_rowids = ibs._get_all_annotmatch_rowids()
tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowids)
flags = filterflags_general_tags(tags_list, **kwargs)
filtered_annotmatch_rowids = ut.compress(annotmatch_rowids, flags)
return filtered_annotmatch_rowids
# TODO: ut.filterflags_general_tags
def filterflags_general_tags(
tags_list,
has_any=None,
has_all=None,
has_none=None,
min_num=None,
max_num=None,
any_startswith=None,
any_endswith=None,
any_match=None,
none_match=None,
logic='and',
):
r"""
maybe integrate into utool? Seems pretty general
Args:
tags_list (list):
has_any (None): (default = None)
has_all (None): (default = None)
min_num (None): (default = None)
max_num (None): (default = None)
CommandLine:
python -m wbia.tag_funcs --exec-filterflags_general_tags
python -m wbia.tag_funcs --exec-filterflags_general_tags:0 --helpx
python -m wbia.tag_funcs --exec-filterflags_general_tags:0
python -m wbia.tag_funcs --exec-filterflags_general_tags:0 --none_match n
python -m wbia.tag_funcs --exec-filterflags_general_tags:0 --has_none=n,o
python -m wbia.tag_funcs --exec-filterflags_general_tags:1
python -m wbia.tag_funcs --exec-filterflags_general_tags:2
Example0:
>>> # DISABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o',], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']]
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list)
>>> print('kwargs = %r' % (kwargs,))
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> print(flags)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example1:
>>> # ENABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o',], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']]
>>> has_all = 'n'
>>> min_num = 1
>>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example2:
>>> # ENABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o',], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']]
>>> kwargs = {
>>> 'any_endswith': 'n',
>>> 'any_match': None,
>>> 'any_startswith': 'n',
>>> 'has_all': None,
>>> 'has_any': None,
>>> 'has_none': None,
>>> 'max_num': 3,
>>> 'min_num': 1,
>>> 'none_match': ['P'],
>>> }
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> filtered = ut.compress(tags_list, flags)
>>> result = ('result = %s' % (ut.repr2(filtered),))
result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']]
"""
import re
import operator
def fix_tags(tags):
return {six.text_type(t.lower()) for t in tags}
if logic is None:
logic = 'and'
logic_func = {'and': np.logical_and, 'or': np.logical_or}[logic]
default_func = {'and': np.ones, 'or': np.zeros}[logic]
tags_list_ = [fix_tags(tags_) for tags_ in tags_list]
flags = default_func(len(tags_list_), dtype=np.bool)
if min_num is not None:
flags_ = [len(tags_) >= min_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if max_num is not None:
flags_ = [len(tags_) <= max_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_any is not None:
has_any = fix_tags(set(ut.ensure_iterable(has_any)))
flags_ = [len(has_any.intersection(tags_)) > 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_none is not None:
has_none = fix_tags(set(ut.ensure_iterable(has_none)))
flags_ = [len(has_none.intersection(tags_)) == 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_all is not None:
has_all = fix_tags(set(ut.ensure_iterable(has_all)))
flags_ = [
len(has_all.intersection(tags_)) == len(has_all) for tags_ in tags_list_
]
logic_func(flags, flags_, out=flags)
def check_item(tags_, fields, op, compare):
t_flags = [any([compare(t, f) for f in fields]) for t in tags_]
num_passed = sum(t_flags)
flag = op(num_passed, 0)
return flag
def flag_tags(tags_list, fields, op, compare):
flags = [check_item(tags_, fields, op, compare) for tags_ in tags_list_]
return flags
def execute_filter(flags, tags_list, fields, op, compare):
if fields is not None:
fields = ut.ensure_iterable(fields)
flags_ = flag_tags(tags_list, fields, op, compare)
logic_func(flags, flags_, out=flags)
return flags
flags = execute_filter(
flags, tags_list, any_startswith, operator.gt, six.text_type.startswith
)
flags = execute_filter(
flags, tags_list, any_endswith, operator.gt, six.text_type.endswith
)
flags = execute_filter(
flags, tags_list, any_match, operator.gt, lambda t, f: re.match(f, t)
)
flags = execute_filter(
flags, tags_list, none_match, operator.eq, lambda t, f: re.match(f, t)
)
return flags
@register_ibs_method
@profile
def get_annotmatch_case_tags(ibs, annotmatch_rowids):
r"""
Args:
ibs (IBEISController): wbia controller object
annotmatch_rowids (?):
Returns:
list: filtered_aid_list
CommandLine:
python -m wbia.tag_funcs --exec-get_annotmatch_case_tags
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_Master1')
>>> annotmatch_rowids = ibs._get_all_annotmatch_rowids()
>>> tags_list = get_annotmatch_case_tags(ibs, annotmatch_rowids)
>>> result = ('tags_list = %s' % (str(tags_list),))
>>> print(result)
tags_list = [[u'occlusion', u'pose', 'Hard', 'NonDistinct'], [], ['Hard']]
"""
standard, other = get_cate_categories()
annotmatch_tag_texts_list = ibs.get_annotmatch_tag_text(annotmatch_rowids)
tags_list = [
[] if note is None else _parse_tags(note) for note in annotmatch_tag_texts_list
]
# NEW = False
# if NEW:
# # hack for faster tag parsing
# from wbia.control import _autogen_annotmatch_funcs as _aaf
# import itertools
# colnames = (_aaf.ANNOTMATCH_IS_HARD, _aaf.ANNOTMATCH_IS_SCENERYMATCH,
# _aaf.ANNOTMATCH_IS_PHOTOBOMB, _aaf.ANNOTMATCH_IS_NONDISTINCT)
# id_iter = annotmatch_rowids
# annotmatch_is_col = ibs.db.get(
# ibs.const.ANNOTMATCH_TABLE, colnames, id_iter, id_colname='rowid',
# eager=True, nInput=None, unpack_scalars=True)
# annotmatch_is_col = [col if col is not None else [None] * len(colnames)
# for col in annotmatch_is_col]
# standardtags = [x[len('annotmatch_is_'):] for x in colnames]
# standard_tags_list = ut.list_zipcompress(itertools.repeat(standardtags), annotmatch_is_col)
# tags_list = [tags1 + tags2 for tags1, tags2 in zip(tags_list, standard_tags_list)]
# else:
# for case in standard:
# flag_list = ibs.get_annotmatch_prop(case, annotmatch_rowids)
# for tags in ut.compress(tags_list, flag_list):
# tags.append(case)
tags_list = [[six.text_type(t) for t in tags] for tags in tags_list]
# if ut.get_argval('--consol') or True:
# tags_list = consolodate_annotmatch_tags(tags_list)
return tags_list
@profile
def get_annotmatch_standard_prop(ibs, prop, annotmatch_rowids):
getter = getattr(ibs, 'get_annotmatch_is_' + prop.lower())
flag_list = getter(annotmatch_rowids)
return flag_list
@register_ibs_method
@profile
def get_annotmatch_prop(ibs, prop, annotmatch_rowids):
r"""
hacky getter for dynamic properties of annotmatches using notes table
Args:
prop (str):
annotmatch_rowids (?):
Returns:
list: filtered_aid_list
CommandLine:
python -m wbia.tag_funcs --exec-get_annotmatch_prop
Example:
>>> # DISABLE_DOCTEST
>>> # Test setting and getting standard keys
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> prop = 'hard'
>>> annotmatch_rowids = ibs._get_all_annotmatch_rowids()
>>> flag_list = get_annotmatch_prop(ibs, prop, annotmatch_rowids)
>>> flag_list = ('filtered_aid_list = %s' % (str(flag_list),))
>>> subset_rowids = annotmatch_rowids[::2]
>>> set_annotmatch_prop(ibs, prop, subset_rowids, [True] * len(subset_rowids))
>>> flag_list2 = get_annotmatch_prop(ibs, prop, annotmatch_rowids)
>>> print('flag_list2 = %r' % (flag_list2,))
Example:
>>> # DISABLE_DOCTEST
>>> # Test setting and getting non-standard keys
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> prop = 'occlusion'
>>> annotmatch_rowids = ibs._get_all_annotmatch_rowids()
>>> flag_list = get_annotmatch_prop(ibs, prop, annotmatch_rowids)
>>> flag_list = ('filtered_aid_list = %s' % (str(flag_list),))
>>> subset_rowids = annotmatch_rowids[1::2]
>>> subset_rowids1 = annotmatch_rowids[::2]
>>> set_annotmatch_prop(ibs, prop, subset_rowids1, [True] * len(subset_rowids))
>>> set_annotmatch_prop(ibs, 'pose', subset_rowids1, [True] * len(subset_rowids))
>>> flag_list2 = get_annotmatch_prop(ibs, prop, annotmatch_rowids)
>>> print('flag_list2 = %r' % (flag_list2,))
"""
# if prop.lower() in ANNOTMATCH_PROPS_STANDARD_SET:
# return ibs.get_annotmatch_standard_prop(prop, annotmatch_rowids)
for prop_ in ut.ensure_iterable(prop):
flag1 = prop_.lower() not in ANNOTMATCH_PROPS_OTHER_SET
flag2 = prop_.lower() not in ANNOTMATCH_PROPS_OLD_SET
if flag1 and flag2:
raise NotImplementedError('Unknown prop_=%r' % (prop_,))
return get_annotmatch_other_prop(ibs, prop, annotmatch_rowids)
@register_ibs_method
def set_annotmatch_prop(ibs, prop, annotmatch_rowids, flags):
"""
hacky setter for dynamic properties of annotmatches using notes table
"""
logger.info(
'[ibs] set_annotmatch_prop prop=%s for %d pairs' % (prop, len(annotmatch_rowids))
)
# if prop.lower() in ANNOTMATCH_PROPS_STANDARD_SET:
# setter = getattr(ibs, 'set_annotmatch_is_' + prop.lower())
# return setter(annotmatch_rowids, flags)
if (
prop.lower() in ANNOTMATCH_PROPS_OTHER_SET
or prop.lower() in ANNOTMATCH_PROPS_OLD_SET
):
return set_annotmatch_other_prop(ibs, prop, annotmatch_rowids, flags)
else:
raise NotImplementedError(
'Unknown prop=%r not in %r' % (prop, ANNOTMATCH_PROPS_OTHER_SET)
)
def _parse_tags(note):
""" convert a note into tags """
return [tag.strip() for tag in note.split(';') if len(tag) > 0]
def _remove_tag(tags, prop):
""" convert a note into tags """
try:
tags.remove(prop)
except ValueError:
pass
return tags
@profile
def get_annotmatch_other_prop(ibs, prop, annotmatch_rowids):
annotmatch_tag_texts_list = ibs.get_annotmatch_tag_text(annotmatch_rowids)
flag_list = get_textformat_tag_flags(prop, annotmatch_tag_texts_list)
return flag_list
def set_annotmatch_other_prop(ibs, prop, annotmatch_rowids, flags):
"""
sets nonstandard properties using the notes column
"""
annotmatch_tag_texts_list = ibs.get_annotmatch_tag_text(annotmatch_rowids)
new_notes_list = set_textformat_tag_flags(prop, annotmatch_tag_texts_list, flags)
ibs.set_annotmatch_tag_text(annotmatch_rowids, new_notes_list)
@profile
def get_textformat_tag_flags(prop, text_list):
""" general text tag getter hack """
tags_list = [None if note is None else _parse_tags(note) for note in text_list]
if ut.isiterable(prop):
props_ = [p.lower() for p in prop]
flags_list = [
[None if tags is None else int(prop_ in tags) for tags in tags_list]
for prop_ in props_
]
return flags_list
else:
prop = prop.lower()
flag_list = [None if tags is None else int(prop in tags) for tags in tags_list]
return flag_list
def set_textformat_tag_flags(prop, text_list, flags):
""" general text tag setter hack """
prop = prop.lower()
ensured_text = ['' if note is None else note for note in text_list]
tags_list = [_parse_tags(note) for note in ensured_text]
# Remove from all
new_tags_list = [_remove_tag(tags, prop) for tags in tags_list]
# then add to specified ones
for tags, flag in zip(new_tags_list, flags):
if flag:
tags.append(prop)
new_text_list = [';'.join(tags) for tags in new_tags_list]
return new_text_list
ANNOT_TAGS = [
'occlusion',
'lighting',
'quality',
'pose',
'error',
'interesting',
'error:viewpoint',
'error:quality',
'occlusion:large',
'occlusion:medium',
'occlusion:small',
'lighting:shadowed',
'lighting:overexposed',
'lighting:underexposed',
'quality:washedout',
'quality:blury',
'pose:novel',
'pose:common',
'error:bbox',
'error:mask',
'error:other',
]
def get_available_annot_tags():
return ANNOT_TAGS
def get_annot_prop(ibs, prop, aid_list):
"""
Annot tags
"""
text_list = ibs.get_annot_tag_text(aid_list)
flag_list = get_textformat_tag_flags(prop, text_list)
return flag_list
@register_ibs_method
def set_annot_prop(ibs, prop, aid_list, flags):
"""
sets nonstandard properties using the notes column
"""
text_list = ibs.get_annot_tag_text(aid_list)
new_text_list = set_textformat_tag_flags(prop, text_list, flags)
ibs.set_annot_tag_text(aid_list, new_text_list)
@register_ibs_method
def append_annot_case_tags(ibs, aid_list, tag_list):
"""
Generally appends tags to annotations. Careful not to introduce too many
random tags. Maybe we should just let that happen and introduce tag-aliases
Note: this is more of a set add rather than a list append
TODO: remove
"""
# Ensure each item is a list
# tags_list = [tag if isinstance(tag, list) else [tag] for tag in tag_list]
if isinstance(tag_list, six.string_types):
# Apply single tag to everybody
tag_list = [tag_list] * len(aid_list)
tags_list = [ut.ensure_iterable(tag) for tag in tag_list]
text_list = ibs.get_annot_tag_text(aid_list)
orig_tags_list = [[] if note is None else _parse_tags(note) for note in text_list]
new_tags_list = [ut.unique(t1 + t2) for t1, t2 in zip(tags_list, orig_tags_list)]
ibs.set_annot_case_tags(aid_list, new_tags_list)
@register_ibs_method
def set_annot_case_tags(ibs, aid_list, new_tags_list):
"""
Completely overwrite case tags
"""
for tag in new_tags_list:
assert isinstance(tag, list), 'each set of tags must be a list of strs'
new_text_list = [';'.join(tags) for tags in new_tags_list]
ibs.set_annot_tag_text(aid_list, new_text_list)
@register_ibs_method
def remove_annot_case_tags(ibs, aid_list, tag_list):
if isinstance(tag_list, six.string_types):
# Apply single tag to everybody
tag_list = [tag_list] * len(aid_list)
tags_list = [ut.ensure_iterable(tag) for tag in tag_list]
text_list = ibs.get_annot_tag_text(aid_list)
orig_tags_list = [[] if note is None else _parse_tags(note) for note in text_list]
new_tags_list = [ut.setdiff(t2, t1) for t1, t2 in zip(tags_list, orig_tags_list)]
new_text_list = [';'.join(tags) for tags in new_tags_list]
ibs.set_annot_tag_text(aid_list, new_text_list)
@register_ibs_method
def overwrite_annot_case_tags(ibs, aid_list, tag_list):
"""
Completely replaces annotation tags.
BE VERY CAREFUL WITH THIS FUNCTION
"""
assert all([ut.isiterable(tag) for tag in tag_list])
# text_list = ibs.get_annot_tag_text(aid_list)
# orig_tags_list = [[] if note is None else _parse_tags(note) for note in text_list]
new_tags_list = tag_list
new_text_list = [';'.join(tags) for tags in new_tags_list]
ibs.set_annot_tag_text(aid_list, new_text_list)
@register_ibs_method
def remove_all_annot_case_tags(ibs, aid_list):
ibs.set_annot_tag_text(aid_list, [''] * len(aid_list))
@register_ibs_method
def get_annot_case_tags(ibs, aid_list):
r"""
returns list of tags. Use instead of get_annot_tag_text
TODO:
rename to get_annot_unary_tags
Args:
ibs (IBEISController): wbia controller object
aid_list (list): list of annotation rowids
Returns:
list: tags_list
CommandLine:
python -m wbia.tag_funcs --exec-get_annot_case_tags
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> from wbia.tag_funcs import _parse_tags # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> tags_list = get_annot_case_tags(ibs, aid_list)
>>> result = ('tags_list = %s' % (str(tags_list),))
>>> print(result)
Ignore:
# FIXME incorrporate old tag notes
aid_list = ibs.get_valid_aids()
notes_list = ibs.get_annot_notes(aid_list)
flags = [len(notes) > 0 for notes in notes_list]
aid_list = ut.compress(aid_list, flags)
notes_list = ut.compress(notes_list, flags)
import re
notes_list = [note.replace('rfdetect', '') for note in notes_list]
notes_list = [note.replace('<COMMA>', ';') for note in notes_list]
notes_list = [note.replace('jpg', '') for note in notes_list]
notes_list = [note.replace('<HARDCASE>', '') for note in notes_list]
notes_list = [note.strip() for note in notes_list]
notes_list = [re.sub(';;*', ';', note) for note in notes_list]
notes_list = [note.strip(';') for note in notes_list]
notes_list = [note.strip(':') for note in notes_list]
notes_list = [note.strip() for note in notes_list]
flags = [len(notes) < 70 and len(notes) > 0 for notes in notes_list]
aid_list = ut.compress(aid_list, flags)
notes_list = ut.compress(notes_list, flags)
flags = ['M;' not in notes and 'F;' not in notes and 'H1' not in notes for notes in notes_list]
flags = [ 'M;' not in notes and 'F;' not in notes and 'H1' not in notes for notes in notes_list]
aid_list = ut.compress(aid_list, flags)
notes_list = ut.compress(notes_list, flags)
flags = ['aliases' not in notes for notes in notes_list]
aid_list = ut.compress(aid_list, flags)
notes_list = ut.compress(notes_list, flags)
#flags = [not re.match(';\d*;', note) for note in notes_list]
flags = [not re.match(r'\d\d*', note) for note in notes_list]
aid_list = ut.compress(aid_list, flags)
notes_list = ut.compress(notes_list, flags)
flags = [not notes.startswith('Foal;') for notes in notes_list]
aid_list = ut.compress(aid_list, flags)
notes_list = ut.compress(notes_list, flags)
old_tags_list = [_parse_tags(note) for note in notes_list]
old_tags = list(set(ut.flatten(old_tags_list)))
old_tags = sorted([tag for tag in old_tags if not re.match(r'\d\d*', tag)])
old_to_new = {
'gash': None,
'pose': 'novelpose',
'vocalizing': 'novelpose'
'occlusion': 'occlusion',
}
Ignore:
python -m wbia.tag_funcs --exec-filter_annotmatch_by_tags --show --db PZ_Master1 --tags viewpoint
"""
text_list = ibs.get_annot_tag_text(aid_list)
tags_list = [[] if note is None else _parse_tags(note) for note in text_list]
return tags_list
@register_ibs_method
@profile
def get_annot_annotmatch_tags(ibs, aid_list):
r"""
Args:
ibs (IBEISController): wbia controller object
aid_list (list): list of annotation rowids
Returns:
list: annotmatch_tags_list
CommandLine:
python -m wbia.tag_funcs --exec-get_annot_annotmatch_tags --db GZ_Master1
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> all_tags = ut.flatten(get_annot_annotmatch_tags(ibs, aid_list))
>>> tag_hist = ut.dict_hist(all_tags)
>>> ut.print_dict(tag_hist)
"""
annotmatch_rowids = ibs.get_annotmatch_rowids_from_aid(aid_list)
unflat_tags_list = ibs.unflat_map(ibs.get_annotmatch_case_tags, annotmatch_rowids)
annotmatch_tags_list = [
list(set(ut.flatten(_unflat_tags))) for _unflat_tags in unflat_tags_list
]
return annotmatch_tags_list
@register_ibs_method
@profile
def get_annot_all_tags(ibs, aid_list=None):
"""
CommandLine:
python -m wbia.tag_funcs --exec-get_annot_all_tags --db GZ_Master1
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.tag_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> all_tags = ut.flatten(ibs.get_annot_all_tags(aid_list))
>>> tag_hist = ut.dict_hist(all_tags)
>>> ut.print_dict(tag_hist)
"""
if aid_list is None:
aid_list = ibs.get_valid_aids()
annotmatch_tags_list = ibs.get_annot_annotmatch_tags(aid_list)
annot_tags_list = ibs.get_annot_case_tags(aid_list)
both_tags_list = list(
map(
ut.unique_ordered,
map(ut.flatten, zip(annot_tags_list, annotmatch_tags_list)),
)
)
return both_tags_list
|
|
import math
import numpy as np
def confidence(prediction):
"""
Metric to evaluate the confidence of a model's prediction of an image's class.
:param prediction: List[float] per-class probability of an image to belong to the class
:return: Difference between guessed class probability and the mean of other probabilities
"""
m = np.max(prediction)
return m - (sum(prediction) - m) / (len(prediction) - 1)
def prediction_rating(prediction, true_class):
"""
Metric to evaluate the prediction of a model's prediction of an image's class.
:param prediction: List[float] per-class probability of an image to belong to the class
:param true_class: The class the image actually belongs to.
:return:
"""
p_true = prediction[true_class]
prediction = np.delete(prediction, true_class)
p_max, p_min = np.max(prediction), np.min(prediction)
if p_max == p_min:
assert p_max < 0.01
return 1
x = (1 + p_true - p_max) / (p_max - p_min)
return math.atan(x)*2/math.pi
def prediction_ratings(predictions, true_classes):
return [prediction_rating(predictions[i], true_classes[i]) for i in xrange(len(predictions))]
def confidences(predictions):
return [confidence(p) for p in predictions]
def accuracy(predicted_classes, true_classes):
"""
Computes accuracy of a model based on the predictions /predicted_classes/.
:param predicted_classes: List[int] Classes guessed by the model
:param true_classes: List[int] Ground-truth
:return: float Accuracy of the input predictions
"""
nz = np.count_nonzero(np.subtract(predicted_classes, true_classes))
acc = float(len(true_classes) - nz) / len(true_classes)
# print('Test Accuracy = ' + str(acc))
return acc
# ====== # Sorting # ====== #
def sort_by_correctness(predictions, true_classes, orig_images):
"""
Separates a test dataset into correctly guessed images and incorrectly guessed images.
:param predictions:
:param true_classes:
:param orig_images:
:return:
"""
correct_images = []
incorrect_images = []
for i in xrange(len(predictions)):
if predictions[i] == true_classes[i]:
correct_images.append(orig_images[i])
else:
incorrect_images.append(orig_images[i])
return correct_images, incorrect_images
def sort_by_confidence(confidences, number_elements=None):
"""
Crescent sort
:param confidences: List of confidences
:param number_elements: How many elements to return
:return: Two lists of indexes for high and low confidences.
"""
if number_elements is None or number_elements > len(confidences)//2:
number_elements = len(confidences)//2
sorted_args = np.argsort(confidences)
# return high_confidence, low_confidence
return sorted_args[-number_elements:], sorted_args[:number_elements]
|
|
'''Cell-cell variation measurements'''
import numpy as np
import pandas as pd
import scanpy.api as sc
import anndata
from typing import Union, Callable, Iterable
import matplotlib.pyplot as plt
def median_filter(x: np.ndarray,
k: int,
pad_ends: bool = True,) -> np.ndarray:
'''Computes a median filter on signal `x` with
specified kernel and stride parameters
Parameters
----------
x : np.ndarray
[T,] length signal.
k : int
size of the kernel window. must be odd.
Returns
-------
y : np.ndarray
[T,] median filtered output.
where the ends of the valid signal are padded by
repeating initial and final values.
Notes
-----
size of an output from a convolution with no zero-padding
and variable strides is:
.. math::
O = (I - k)/s + 1
where O is the output size, I is the input size, k is the kernel,
and s is the stride.
So below:
.. math::
O = (T - k)/1 + 1
References
----------
https://arxiv.org/abs/1603.07285
'''
if k % 2 != 1:
raise ValueError('k must be odd, you passed %d' % k)
T = x.shape[0]
O = np.zeros((T-k) + 1)
sidx = k//2
eidx = T - k//2
for i, idx in enumerate(range(sidx, eidx)):
m = np.median(x[idx:(idx+k)])
O[i] = m
if pad_ends:
# hold values of filtered signal in H
# and remake O as the padded output
H = O.copy()
O = np.zeros(x.shape[0])
O[sidx:eidx] = H
O[:sidx] = H[0]
O[eidx:] = H[-1]
return O
def diff_from_median(X: np.ndarray,
gene_names: np.ndarray,
min_mean: float = 0.1,
max_mean: float = 5.,
kernel_size: int = 49,
plot: bool = True,
logged: bool = True,) -> pd.DataFrame:
'''Implements the difference-from-the-median
method of estimating overdispersion
Parameters
----------
X : np.ndarray
[Cells, Genes] expression matrix.
gene_names : np.ndarary
[Genes,] gene name strings.
min_mean : float
minimum mean expression value to be included
in the median variation calculation.
max_mean : float
maximum mean expression value to be included in the
median variation calculation.
kernel_size : int
size of the kernel for median filtering
coefficients of variation.
plot : bool
plot the rolling median
logged : bool
values in `X` are log counts.
Returns
-------
overdispersion : pd.DataFrame
[Genes,] overdispersion estimate. indexed by gene name.
References
----------
Kolodziejczyk, A. A., et. al. (2015). Cell stem cell, 17(4), 471-85.
'''
gene_means = X.mean(axis=0)
expressed_bidx = np.logical_and(
gene_means > min_mean,
gene_means < max_mean,)
expr_gene_means = gene_means[expressed_bidx]
expr_gene_names = gene_names[expressed_bidx]
sdevs = X.std(axis=0)
expr_sdevs = sdevs[expressed_bidx]
cvs = expr_sdevs / expr_gene_means
cvs2 = cvs**2
if not logged:
log_expr_means = np.log10(expr_gene_means)
log_cv2 = np.log10(cvs2)
else:
log_expr_means = expr_gene_means
log_cv2 = cvs2
log_cv2_sorted = log_cv2[np.argsort(log_expr_means)]
sorted_gene_names = expr_gene_names[np.argsort(log_expr_means)]
# use a median filter to calculate the rolling median
rolling_median = median_filter(log_cv2_sorted, k=kernel_size)
# compute "difference from the median"
DM = log_cv2_sorted - rolling_median
ordered_DM = np.zeros_like(DM)
ordered_DM[np.argsort(log_expr_means)] = DM
ordered_gene_names = np.zeros_like(sorted_gene_names)
ordered_gene_names[np.argsort(log_expr_means)] = sorted_gene_names
df = pd.DataFrame({'DM': ordered_DM,
'Mean': expr_gene_means},
index=ordered_gene_names)
if plot:
low_dm_bidx = DM < np.percentile(ordered_DM, 95)
plt.figure()
plt.scatter(np.sort(log_expr_means)[low_dm_bidx],
log_cv2_sorted[low_dm_bidx],
alpha=0.5, s=1.,
c='gray')
plt.scatter(np.sort(log_expr_means)[~low_dm_bidx],
log_cv2_sorted[~low_dm_bidx],
alpha=0.5, s=1.,
c='black')
plt.plot(np.sort(log_expr_means), rolling_median,
c='blue', label='Median')
plt.legend(frameon=False)
plt.xlabel(r'$\mu$')
plt.ylabel(r'CV^2')
return df
def diff_from_centroid(adata: anndata.AnnData,
groupby: str = 'cell_type',
embedding: str = 'counts') -> pd.DataFrame:
'''
Computes the distance of each sample from the centroid
of its group.
Parameters
----------
adata : anndata.AnnData
[Cells, Genes]
groupby : str
column in `adata.obs` defining groups.
embedding : str
space in which to compute distances
["counts", "pca"].
Returns
-------
df : pd.DataFrame
[Cells, (DistanceToMean, Group)]
'''
if embedding == 'counts':
X = adata.X.toarray()
elif embedding == 'pca':
X = adata.obsm['X_pca']
else:
raise ValueError()
groups = np.unique(adata.obs[groupby])
distances = []
for i, g in enumerate(groups):
group_bidx = np.array(adata.obs[groupby] == g)
group_X = X[group_bidx, :]
group_center = group_X.mean(0) # [Genes,]
group_center = group_center.reshape(1, -1) # [1, Genes]
group_center_mat = np.tile(
group_center, (group_X.shape[0], 1)) # [Cells, Genes]
D = np.sqrt(np.sum((group_X - group_center_mat)**2, axis=1))
dist_df = pd.DataFrame(
{'DistanceToMean': D,
'Group': g,
},
index=np.array(adata.obs_names)[group_bidx],
)
distances += [dist_df]
distances = pd.concat(distances, 0)
return distances
|
|
"""
@author: Timothy Brathwaite
@name: Bootstrap Sampler
@summary: This module provides functions that will perform the stratified
resampling needed for the bootstrapping procedure.
"""
from collections import OrderedDict
import numpy as np
import pandas as pd
def relate_obs_ids_to_chosen_alts(obs_id_array,
alt_id_array,
choice_array):
"""
Creates a dictionary that relates each unique alternative id to the set of
observations ids that chose the given alternative.
Parameters
----------
obs_id_array : 1D ndarray of ints.
Should be a long-format array of observation ids. Each element should
correspond to the unique id of the unit of observation that corresponds
to the given row of the long-format data. Note that each unit of
observation may have more than one associated choice situation.
alt_id_array : 1D ndarray of ints.
Should be a long-format array of alternative ids. Each element should
denote the unique id of the alternative that corresponds to the given
row of the long format data.
choice_array : 1D ndarray of ints.
Each element should be either a one or a zero, indicating whether the
alternative on the given row of the long format data was chosen or not.
Returns
-------
chosen_alts_to_obs_ids : dict.
Each key will be a unique value from `alt_id_array`. Each key's value
will be a 1D ndarray that contains the sorted, unique observation ids
of those observational units that chose the given alternative.
"""
# Figure out which units of observation chose each alternative.
chosen_alts_to_obs_ids = {}
for alt_id in np.sort(np.unique(alt_id_array)):
# Determine which observations chose the current alternative.
selection_condition =\
np.where((alt_id_array == alt_id) & (choice_array == 1))
# Store the sorted, unique ids that chose the current alternative.
chosen_alts_to_obs_ids[alt_id] =\
np.sort(np.unique(obs_id_array[selection_condition]))
# Return the desired dictionary.
return chosen_alts_to_obs_ids
def get_num_obs_choosing_each_alternative(obs_per_alt_dict):
"""
Will create an ordered dictionary that records the number of units of
observation that have chosen the given alternative (i.e. the associated
dictionary key). Will also determine the total number of unique
observations in the dataset.
Parameters
----------
obs_per_alt_dict : dict.
Each key should be a unique alternave id. Each key's value will be 1D
ndarray that contains the sorted, unique observation ids of those
observational units that chose the given alternative.
Returns
-------
num_obs_per_group : OrderedDict.
Keys will be the alternative ids present in `obs_per_alt_dict`. Values
will be the `len(obs_per_alt_dict[alt_id]).`
tot_num_obs : int.
Denotes the total number of unique observation ids in one's dataset.
"""
# Initialize the object that is to be returned.
num_obs_per_group = OrderedDict()
# Determine the number of unique units of observation per group.
for alt_id in obs_per_alt_dict:
num_obs_per_group[alt_id] = len(obs_per_alt_dict[alt_id])
# Determine the total number of units of observation that will be chosen
# for each bootstrap sample.
tot_num_obs = sum([num_obs_per_group[g] for g in num_obs_per_group])
# Return the desired objects.
return num_obs_per_group, tot_num_obs
def create_cross_sectional_bootstrap_samples(obs_id_array,
alt_id_array,
choice_array,
num_samples,
seed=None):
"""
Determines the unique observations that will be present in each bootstrap
sample. This function DOES NOT create the new design matrices or a new
long-format dataframe for each bootstrap sample. Note that these will be
correct bootstrap samples for cross-sectional datasets. This function will
not work correctly for panel datasets.
Parameters
----------
obs_id_array : 1D ndarray of ints.
Each element should denote a unique observation id for the
corresponding row of the long format array.
alt_id_array : 1D ndarray of ints.
Each element should denote a unique alternative id for the
corresponding row of the long format array.
choice_array : 1D ndarray of ints.
Each element should be a one or a zero. The values should denote a
whether or not the corresponding alternative in `alt_id_array` was
chosen by the observational unit in the corresponding row of
`obs_id_array.`
num_samples : int.
Denotes the number of bootstrap samples that need to be drawn.
seed : non-negative int or None, optional.
Denotes the random seed to be used in order to ensure reproducibility
of the bootstrap sample generation. Default is None. If None, no seed
will be used and the generation of the bootstrap samples will (in
general) not be reproducible.
Returns
-------
ids_per_sample : 2D ndarray.
Each row represents a complete bootstrap sample. Each column denotes a
selected bootstrap observation that comprises the bootstrap sample. The
elements of the array denote the observation ids of the chosen
observational units.
"""
# Determine the units of observation that chose each alternative.
chosen_alts_to_obs_ids =\
relate_obs_ids_to_chosen_alts(obs_id_array, alt_id_array, choice_array)
# Determine the number of unique units of observation per group and overall
num_obs_per_group, tot_num_obs =\
get_num_obs_choosing_each_alternative(chosen_alts_to_obs_ids)
# Initialize the array that will store the observation ids for each sample
ids_per_sample = np.empty((num_samples, tot_num_obs), dtype=float)
if seed is not None:
# Check the validity of the seed argument.
if not isinstance(seed, int):
msg = "`boot_seed` MUST be an int."
raise ValueError(msg)
# If desiring reproducibility, set the random seed within numpy
np.random.seed(seed)
# Initialize a variable to keep track of what column we're on.
col_idx = 0
for alt_id in num_obs_per_group:
# Get the set of observations that chose the current alternative.
relevant_ids = chosen_alts_to_obs_ids[alt_id]
# Determine the number of needed resampled ids.
resample_size = num_obs_per_group[alt_id]
# Resample, with replacement, observations who chose this alternative.
current_ids = (np.random.choice(relevant_ids,
size=resample_size * num_samples,
replace=True)
.reshape((num_samples, resample_size)))
# Determine the last column index to use when storing the resampled ids
end_col = col_idx + resample_size
# Assign the sampled ids to the correct columns of ids_per_sample
ids_per_sample[:, col_idx:end_col] = current_ids
# Update the column index
col_idx += resample_size
# Return the resampled observation ids.
return ids_per_sample
def create_bootstrap_id_array(obs_id_per_sample):
"""
Creates a 2D ndarray that contains the 'bootstrap ids' for each replication
of each unit of observation that is an the set of bootstrap samples.
Parameters
----------
obs_id_per_sample : 2D ndarray of ints.
Should have one row for each bootsrap sample. Should have one column
for each observational unit that is serving as a new bootstrap
observational unit.
Returns
-------
bootstrap_id_array : 2D ndarray of ints.
Will have the same shape as `obs_id_per_sample`. Each element will
denote the fake observational id in the new bootstrap dataset.
"""
# Determine the shape of the object to be returned.
n_rows, n_cols = obs_id_per_sample.shape
# Create the array of bootstrap ids.
bootstrap_id_array =\
np.tile(np.arange(n_cols) + 1, n_rows).reshape((n_rows, n_cols))
# Return the desired object
return bootstrap_id_array
def create_deepcopied_groupby_dict(orig_df, obs_id_col):
"""
Will create a dictionary where each key corresponds to a unique value in
`orig_df[obs_id_col]` and each value corresponds to all of the rows of
`orig_df` where `orig_df[obs_id_col] == key`.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
Returns
-------
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
"""
# Get the observation id values
obs_id_vals = orig_df[obs_id_col].values
# Get the unique observation ids
unique_obs_ids = np.unique(obs_id_vals)
# Initialize the dictionary to be returned.
groupby_dict = {}
# Populate the dictionary with dataframes for each individual.
for obs_id in unique_obs_ids:
# Filter out only the rows corresponding to the current observation id.
desired_rows = obs_id_vals == obs_id
# Add the desired dataframe to the dictionary.
groupby_dict[obs_id] = orig_df.loc[desired_rows].copy(deep=True)
# Return the desired object.
return groupby_dict
def check_column_existence(col_name, df, presence=True):
"""
Checks whether or not `col_name` is in `df` and raises a helpful error msg
if the desired condition is not met.
Parameters
----------
col_name : str.
Should represent a column whose presence in `df` is to be checked.
df : pandas DataFrame.
The dataframe that will be checked for the presence of `col_name`.
presence : bool, optional.
If True, then this function checks for the PRESENCE of `col_name` from
`df`. If False, then this function checks for the ABSENCE of
`col_name` in `df`. Default == True.
Returns
-------
None.
"""
if presence:
if col_name not in df.columns:
msg = "Ensure that `{}` is in `df.columns`."
raise ValueError(msg.format(col_name))
else:
if col_name in df.columns:
msg = "Ensure that `{}` is not in `df.columns`."
raise ValueError(msg.format(col_name))
return None
def ensure_resampled_obs_ids_in_df(resampled_obs_ids, orig_obs_id_array):
"""
Checks whether all ids in `resampled_obs_ids` are in `orig_obs_id_array`.
Raises a helpful ValueError if not.
Parameters
----------
resampled_obs_ids : 1D ndarray of ints.
Should contain the observation ids of the observational units that will
be used in the current bootstrap sample.
orig_obs_id_array : 1D ndarray of ints.
Should countain the observation ids of the observational units in the
original dataframe containing the data for this model.
Returns
-------
None.
"""
if not np.in1d(resampled_obs_ids, orig_obs_id_array).all():
msg =\
"All values in `resampled_obs_ids` MUST be in `orig_obs_id_array`."
raise ValueError(msg)
return None
def create_bootstrap_dataframe(orig_df,
obs_id_col,
resampled_obs_ids_1d,
groupby_dict,
boot_id_col="bootstrap_id"):
"""
Will create the altered dataframe of data needed to estimate a choice model
with the particular observations that belong to the current bootstrap
sample.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
resampled_obs_ids_1d : 1D ndarray of ints.
Each value should represent the alternative id of a given bootstrap
replicate.
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
boot_id_col : str, optional.
Denotes the new column that will be created to specify the bootstrap
observation ids for choice model estimation.
Returns
-------
bootstrap_df : pandas Dataframe.
Will contain all the same columns as `orig_df` as well as the
additional `boot_id_col`. For each value in `resampled_obs_ids_1d`,
`bootstrap_df` will contain the long format rows from `orig_df` that
have the given observation id.
"""
# Check the validity of the passed arguments.
check_column_existence(obs_id_col, orig_df, presence=True)
check_column_existence(boot_id_col, orig_df, presence=False)
# Alias the observation id column
obs_id_values = orig_df[obs_id_col].values
# Check the validity of the resampled observation ids.
ensure_resampled_obs_ids_in_df(resampled_obs_ids_1d, obs_id_values)
# Initialize a list to store the component dataframes that will be
# concatenated to form the final bootstrap_df
component_dfs = []
# Populate component_dfs
for boot_id, obs_id in enumerate(resampled_obs_ids_1d):
# Extract the dataframe that we desire.
extracted_df = groupby_dict[obs_id].copy()
# Add the bootstrap id value.
extracted_df[boot_id_col] = boot_id + 1
# Store the component dataframe
component_dfs.append(extracted_df)
# Create and return the desired dataframe.
bootstrap_df = pd.concat(component_dfs, axis=0, ignore_index=True)
return bootstrap_df
|
|
#Libraries to include; you can add more libraries to extend beyond the
# functionality in the tutorial
import numpy as np
import geneMLLib as ml #our custom library
from sklearn import metrics
from sklearn.cluster import KMeans
def main():
#load data, X is gene values, genes is names of genes, y are the labels
dir = "../data/sample-yeast/"
X = ml.loadGeneExpression(dir+'expression.csv')
geneNames = ml.loadGeneNames(dir+'names.txt')
k = 5
model = KMeans(n_clusters=k)
model.fit(X)
clusterIndex = model.predict(X)
print(metrics.silhouette_score(X,clusterIndex))
for i in range(k):
print("Cluster %d" % i)
for name in geneNames[clusterIndex == i]:
print("\t"+name)
print()
main()
|
|
from __future__ import annotations
from typing import Any, Dict, Type, cast
import numpy as np
from tiro_fhir import CodeableConcept
import pandas as pd
from pandas._typing import DtypeObj
from pandas.core.dtypes.dtypes import PandasExtensionDtype, Ordered, Dtype
class CodeableConceptDtypeDtype(type):
pass
@pd.api.extensions.register_extension_dtype
class CodeableConceptDtype(pd.api.extensions.ExtensionDtype):
type = CodeableConcept
name = "CodeableConcept"
na_value = None
@classmethod
def construct_array_type(cls) -> Type[CodeableConcept]:
return CodeableConceptArray
@classmethod
def construct_from_string(cls, string: str) -> CodeableConceptDtype:
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.name}' from '{string}'")
return cls()
@property
def _is_boolean(self) -> bool:
return False
@property
def _is_numeric(self) -> bool:
return False
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# check if we have all codeableconcept dtype
if all(isinstance(x, CodeableConceptDtype) for x in dtypes):
return CodeableConcept
return None
class CodeableConceptArray(pd.api.extensions.ExtensionDtype):
"""Abstract base class for custom 1-D array types."""
def __init__(self, values, dtype=None, copy=False):
"""Instantiate the array.
If you're doing any type coercion in here, you will also need
that in an overwritten __settiem__ method.
But, here we coerce the input values into Decimals.
"""
validated = []
for value in values:
if not isinstance(value, CodeableConcept):
raise TypeError(
f"Expected value of type CodeableConcept but received {value}"
)
validated.append(value)
self._data = np.asarray(values, dtype=object)
self._dtype = CodeableConceptDtype()
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new ExtensionArray from a sequence of scalars."""
return cls(scalars, dtype=dtype)
@property
def ndim(self):
return 1
@classmethod
def _from_factorized(cls, values, original):
"""Reconstruct an ExtensionArray after factorization."""
return cls(values)
def __getitem__(self, item):
"""Select a subset of self."""
return self._data[item]
def __len__(self) -> int:
"""Length of this array."""
return len(self._data)
def _formatter(self, boxed: bool = False):
return str
@property
def nbytes(self):
"""The byte size of the data."""
return self._itemsize * len(self)
@property
def dtype(self):
"""An instance of 'ExtensionDtype'."""
return self._dtype
def isna(self):
"""A 1-D array indicating if each value is missing."""
return np.array([x is None for x in self._data], dtype=bool)
def __arrow_array__(self, type=None):
# convert the underlying array values to a pyarrow Array
import pyarrow
return pyarrow.array([x.text for x in self._data], type="string")
def isin(self, values):
return np.array([x in values for x in self])
def astype(self, *args, **kwargs):
return self._data
def take(self, indexer, allow_fill=False, fill_value=None):
"""Take elements from an array.
Relies on the take method defined in pandas:
https://github.com/pandas-dev/pandas/blob/e246c3b05924ac1fe083565a765ce847fcad3d91/pandas/core/algorithms.py#L1483
"""
from pandas.api.extensions import take
data = self._data
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result)
def copy(self):
"""Return a copy of the array."""
return type(self)(self._data.copy())
@classmethod
def _concat_same_type(cls, to_concat):
"""Concatenate multiple arrays."""
return cls(np.concatenate([x._data for x in to_concat]))
|
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import pdist, squareform
import seaborn as sns
from factored_reps.scripts.seriation import compute_serial_matrix
def shuffle_vars(A, seed=None):
n_vars = len(A)
indices = np.arange(n_vars)
np.random.seed(seed)
np.random.shuffle(indices)
B = A[indices, :]
return B
np.random.seed(0)
size = (20, 20)
A = np.random.randn(*size)
A = np.abs(np.corrcoef(A, rowvar=True))**(2 / 3)
A = shuffle_vars(A)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(321)
im1 = ax.imshow(A, cmap='magma', vmin=0, vmax=1)
ax.set_yticks(range(size[0])), ax.set_xticks(range(size[1]))
ax.set_title('A correlation matrix')
row_ind, col_ind = linear_sum_assignment(-A)
B = A[row_ind, :][:, col_ind]
ax = fig.add_subplot(322)
im2 = ax.imshow(B, cmap='magma', vmin=0, vmax=1)
ax.set_yticks(range(size[0])), ax.set_xticks(range(size[1]))
ax.set_xticklabels(col_ind), ax.set_yticklabels(row_ind)
ax.set_title('Hungarian algorithm')
dist_mat = squareform(pdist(A))
for method, sp in zip(['ward', 'single', 'average', 'complete'], [323, 324, 325, 326]):
_, res_order, res_linkage = compute_serial_matrix(dist_mat, method)
C = A[res_order, :][:, res_order]
ax = fig.add_subplot(sp)
im2 = ax.imshow(C, cmap='magma', vmin=0, vmax=1)
ax.set_yticks(range(size[0])), ax.set_xticks(range(size[1]))
ax.set_xticklabels(res_order), ax.set_yticklabels(res_order)
ax.set_title('Hier. Clustering (' + method + ')')
plt.tight_layout()
plt.show(ax)
#%%
ax = sns.clustermap(A, metric='correlation')
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 29 19:18:23 2016
@author: Pedro Leal
"""
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys, time
from scipy.optimize import minimize
# =============================================================================
# Extension modules
# =============================================================================
from pyOpt import Optimization
from pyOpt import NSGA2
# =============================================================================
# Objective function
# =============================================================================
def objfunc_1(x):
f = x[0] + x[1]
fail = 0
g = []
return f,g,fail
def objfunc_2(x):
f = x[0] - x[1]
fail = 0
g = []
return f,g,fail
def objfunc_3(x):
f1 = x[0] - x[1]
f2 = x[0] + x[1]
f = (f1, f2)
fail = 0
g = []
return f,(g, g), (fail, fail)
# =============================================================================
#
# =============================================================================
chord = 1.
x_hinge = 0.75
safety = 0.005*chord
opt_prob = Optimization('main', (objfunc_1, objfunc_2))
opt_prob.addObj("f1")
opt_prob.addObj("f2")
#xs_n
opt_prob.addVar('x1', 'c', lower = -1 , upper = 1, value = 6.817445e-001)
#ys_n
opt_prob.addVar('x2', 'c', lower = -1, upper = 1, value = -5.216475e-001)
#opt_prob.addObj('2', objfunc_2)
print opt_prob
# Global Optimization
nsga2 = NSGA2()
nsga2.setOption('PopSize', 10)
nsga2.setOption('maxGen', 10)
nsga2(opt_prob)
print opt_prob.solution(0)
|
|
"""
@ Author: ryanreadbooks
@ Time: 9/7/2020, 19:18
@ File name: geometry_utils.py
@ File description: define a bunch of helper functions that are related to the object model and geometry
"""
import numpy as np
import cv2
from configs.configuration import regular_config
def get_model_corners(model_pts: np.ndarray) -> np.ndarray:
"""
return the 8 corners of a model
:param model_pts: model point cloud, shape of (~, 3), ~ means the number of points in model point cloud
:return: the model corners, array of shape (8, 3)
"""
mins = np.min(model_pts, axis=0)
maxs = np.max(model_pts, axis=0)
min_x, min_y, min_z = mins[0], mins[1], mins[2]
max_x, max_y, max_z = maxs[0], maxs[1], maxs[2]
# vertices = np.array([
# [min_x, min_y, min_z],
# [min_x, min_y, max_z],
# [min_x, max_y, min_z],
# [min_x, max_y, max_z],
# [max_x, min_y, min_z],
# [max_x, min_y, max_z],
# [max_x, max_y, min_z],
# [max_x, max_y, max_z]])
vertices = np.array([
[min_x, max_y, max_z],
[min_x, max_y, min_z],
[min_x, min_y, max_z],
[min_x, min_y, min_z],
[max_x, max_y, max_z],
[max_x, max_y, min_z],
[max_x, min_y, max_z],
[max_x, min_y, min_z]])
return vertices
def non_homo_to_homo(pts) -> np.ndarray:
"""
convert non-homogeneous coordinates to homogeneous coordinates
:param pts: point coordinates array of shape (~, m), m is usually 2 or 3, representing 2d coordinates and 3d coordinates
:return: the homogeneous coordinates of the input points
"""
m = pts.shape[1]
pts_homo = np.ones((pts.shape[0], m + 1))
pts_homo[:, :m] = np.copy(pts)
return pts_homo
def project_3d_2d(pts_3d: np.ndarray, camera_intrinsic: np.ndarray, transformation: np.ndarray) -> np.ndarray:
"""
project 3d points to 2d image plane and return the result
:param pts_3d: 3d points to be projected, shape of (n, 3)
:param camera_intrinsic: camera intrinsics, shape of (3, 3)
:param transformation: the transformation matrix, shape (3, 4), [R|t]
:return: array of projected points, shape of (n, 2)
"""
# convert the 3d points to homogeneous coordinates
pts_3d_homo = non_homo_to_homo(pts_3d) # shape (n, 4)
projected_homo = (camera_intrinsic @ transformation @ pts_3d_homo.T).T # (3, 3) x (3, 4) x (4, n) = (3, n) -> Transpose (n, 3)
# make it homo by dividing the last column
projected_homo = projected_homo / projected_homo[:, 2].reshape((-1, 1))
projected = projected_homo[:, :2]
return projected
def generate_camera_intrinsics(fx: float, fy: float, cx: float, cy: float) -> np.ndarray:
"""
form a camera intrinsics matrix
:param fx: fx
:param fy: fy
:param cx: cx
:param cy: cy
:return: the camera intrinsics matrix of shape (3, 3)
"""
camera = np.eye(3)
camera[0, 0] = fx
camera[1, 1] = fy
camera[0, 2] = cx
camera[1, 2] = cy
return camera
def transform_pts(points: np.ndarray, pose: np.ndarray) -> np.ndarray:
"""
transform points of model according to the pose
:param points: model points, array with shape (n, 3)
:param pose: pose array with shape (3, 4), [R|t]
:return: the transformed points, array with shape (n, 3)
"""
assert (points.shape[1] == 3)
r = pose[:, :3] # predicted rotation matrix, shape of (3, 3)
t = pose[:, -1].reshape((3, 1)) # predicted translation, shape of (3, 1)
points_transformed = r.dot(points.T) + t
return points_transformed.T
def calculate_object_diameter(object_pts: np.ndarray) -> float:
"""
calculate the diameter of the input object which is represented in array
:param object_pts: 3d points of the object, array with shape (n, 3)
:return: the diameter
"""
if object_pts.shape[0] > 500:
raise MemoryError('array may be too large, which will crush the computer...!!!')
from scipy.spatial.distance import cdist
# distance: shape (n, n)
distance: np.ndarray = cdist(object_pts, object_pts, 'euclidean')
return np.max(distance)
def depth_to_point_cloud(camera_k, depth: np.ndarray) -> np.ndarray:
"""
convert the depth image to point cloud
:param camera_k: the camera intrinsics, array with shape (3,3)
:param depth: the depth image, array with shape (h, w)
:return: point cloud, array with shape (n, 3)
"""
vs, us = depth.nonzero()
zs: np.ndarray = depth[vs, us]
xs = ((us - camera_k[0, 2]) * zs) / float(camera_k[0, 0])
ys = ((vs - camera_k[1, 2]) * zs) / float(camera_k[1, 1])
pts = np.array([xs, ys, zs]).T
return pts
def solve_pnp(object_pts: np.ndarray, image_pts: np.ndarray, camera_k: np.ndarray, method=cv2.SOLVEPNP_ITERATIVE):
"""
Solve the PnP problem
:param object_pts: the points in object coordinate, shape (n, 3)
:param image_pts: the corresponding points in the image, shape (n, 2)
:param camera_k: the camera intrinsics matrix, shape (3, 3)
:param method: the method used to solve PnP problem, default=cv2.SOLVEPNP_EPNP
:return: the calculated transformation matrix, shape (3, 4)
"""
assert object_pts.shape[0] == image_pts.shape[0], 'number of points do not match.'
# dist_coef = np.zeros(shape=[8, 1], dtype='float64')
_, r_vec, t_vec = cv2.solvePnP(objectPoints=object_pts.astype(np.float64),
imagePoints=image_pts.astype(np.float64),
cameraMatrix=camera_k.astype(np.float64),
distCoeffs=None,
useExtrinsicGuess=True,
flags=method)
r_mat, _ = cv2.Rodrigues(r_vec) # from rotation vector to rotation matrix (3, 3)
transformation = np.hstack([r_mat, t_vec.reshape((-1, 1))])
return transformation
def compute_translation_by_center_and_depth(center: np.ndarray, tz: float) -> np.ndarray:
"""
Compute the translation vector based on the center keypoint and depth
@param center: center keypoint, shape (2, 1)
@param tz: depth of the corresponding keypoint, singular
@return: the computed translation vector
"""
px = regular_config.camera[0, 2]
py = regular_config.camera[1, 2]
fx = regular_config.camera[0, 0]
fy = regular_config.camera[1, 1]
cx = center[0]
cy = center[1]
tx = (cx - px) * tz / fx
ty = (cy - py) * tz / fy
return np.array([tx, ty, tz]).reshape((-1, 1))
|
|
import numpy as np
from PIL import Image
import cv2
from os.path import dirname as ospdn
from .file import may_make_dir
def make_im_grid(ims, n_rows, n_cols, space, pad_val):
"""Make a grid of images with space in between.
Args:
ims: a list of [3, im_h, im_w] images
n_rows: num of rows
n_cols: num of columns
space: the num of pixels between two images
pad_val: scalar, or numpy array with shape [3]; the color of the space
Returns:
ret_im: a numpy array with shape [3, H, W]
"""
assert (ims[0].ndim == 3) and (ims[0].shape[0] == 3)
if (n_rows is None) and (n_cols is None):
n_cols = int(np.ceil(np.sqrt(len(ims))))
n_rows = int(np.ceil(1. * len(ims) / n_cols))
else:
assert len(ims) <= n_rows * n_cols
h, w = ims[0].shape[1:]
H = h * n_rows + space * (n_rows - 1)
W = w * n_cols + space * (n_cols - 1)
if isinstance(pad_val, np.ndarray):
# reshape to [3, 1, 1]
pad_val = pad_val.flatten()[:, np.newaxis, np.newaxis]
ret_im = (np.ones([3, H, W]) * pad_val).astype(ims[0].dtype)
for n, im in enumerate(ims):
r = n // n_cols
c = n % n_cols
h1 = r * (h + space)
h2 = r * (h + space) + h
w1 = c * (w + space)
w2 = c * (w + space) + w
ret_im[:, h1:h2, w1:w2] = im
return ret_im
def read_im(im_path, convert_rgb=True, resize_h_w=(128, 64), transpose=True):
im = Image.open(im_path)
if convert_rgb:
# shape [H, W, 3]
im = im.convert("RGB")
im = np.asarray(im)
if resize_h_w is not None and (im.shape[0], im.shape[1]) != resize_h_w:
im = cv2.resize(im, resize_h_w[::-1], interpolation=cv2.INTER_LINEAR)
if transpose:
# shape [3, H, W]
im = im.transpose(2, 0, 1)
return im
def save_im(im, save_path, transpose=False, check_bound=False):
"""
im: (1) shape [3, H, W], transpose should be True
(2) shape [H, W, 3], transpose should be False
(3) shape [H, W], transpose should be False
"""
may_make_dir(ospdn(save_path))
if transpose:
im = im.transpose(1, 2, 0)
if check_bound:
im = im.clip(0, 255)
im = im.astype(np.uint8)
mode = 'L' if len(im.shape) == 2 else 'RGB'
im = Image.fromarray(im, mode=mode)
im.save(save_path)
def heatmap_to_color_im(
hmap,
normalize=False, min_max_val=None,
resize=False, resize_w_h=None,
transpose=False
):
"""
Args:
hmap: a numpy array with shape [h, w]
normalize: whether to normalize the value to range [0, 1]. If `False`,
make sure that `hmap` has been in range [0, 1]
Return:
hmap: shape [h, w, 3] if transpose=False, shape [3, h, w] if transpose=True, with value in range [0, 255], uint8"""
if resize:
hmap = cv2.resize(hmap, tuple(resize_w_h), interpolation=cv2.INTER_LINEAR)
# normalize to interval [0, 1]
if normalize:
if min_max_val is None:
min_v, max_v = np.min(hmap), np.max(hmap)
else:
min_v, max_v = min_max_val
hmap = (hmap - min_v) / (float(max_v - min_v) + 1e-8)
# The `cv2.applyColorMap(gray_im, cv2.COLORMAP_JET)` maps 0 to RED and 1
# to BLUE, not normal. So rectify it.
hmap = 1 - hmap
hmap = (hmap * 255).clip(0, 255).astype(np.uint8)
# print(hmap.shape, hmap.dtype, np.min(hmap), np.max(hmap))
hmap = cv2.applyColorMap(hmap, cv2.COLORMAP_JET)
if transpose:
hmap = hmap.transpose(2, 0, 1)
return hmap
def restore_im(im, std, mean, transpose=False, resize_w_h=None):
"""Invert the normalization process.
Args:
im: normalized im with shape [3, h, w]
Returns:
im: shape [h, w, 3] if transpose=True, shape [3, h, w] if transpose=False, with value in range [0, 255], uint8
"""
im = im * np.array(std)[:, np.newaxis, np.newaxis]
im = im + np.array(mean)[:, np.newaxis, np.newaxis]
im = (im * 255).clip(0, 255).astype(np.uint8)
if resize_w_h is not None:
im = cv2.resize(im.transpose(1, 2, 0), tuple(resize_w_h), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1)
if transpose:
im = np.transpose(im, [1, 2, 0])
return im
|
|
from Q50_config import *
import sys, os
from GPSReader import *
from GPSTransforms import *
from VideoReader import *
from LidarTransforms import *
from ColorMap import *
from transformations import euler_matrix
import numpy as np
import cv2
from ArgParser import *
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib.cm import jet, rainbow
from LidarIntegrator import start_fn
WINDOW = 2#50*2.5
#IMG_WIDTH = 1280
#IMG_HEIGHT = 960
IMG_WIDTH = 2080
IMG_HEIGHT = 1552
def cloudToPixels(cam, pts_wrt_cam):
width = 4
(pix, J) = cv2.projectPoints(pts_wrt_cam.transpose(), np.array([0.0,0.0,0.0]), np.array([0.0,0.0,0.0]), cam['KK'], cam['distort'])
pix = pix.transpose()
pix = np.around(pix[:, 0, :])
pix = pix.astype(np.int32)
mask = np.logical_and(True, pix[0,:] > 0 + width/2)
mask = np.logical_and(mask, pix[1,:] > 0 + width/2)
mask = np.logical_and(mask, pix[0,:] < 2080 - width/2)
mask = np.logical_and(mask, pix[1,:] < 1552 - width/2)
mask = np.logical_and(mask, pts_wrt_cam[2,:] > 0)
dist_sqr = np.sum( pts_wrt_cam[0:3, :] ** 2, axis = 0)
mask = np.logical_and(mask, dist_sqr > 3)
return (pix, mask)
def localMapToPixels(map_data, imu_transforms_t, T_from_i_to_l, cam):
# load nearby map frames
pts_wrt_imu_0 = array(map_data[:,0:3]).transpose()
pts_wrt_imu_0 = np.vstack((pts_wrt_imu_0,
np.ones((1,pts_wrt_imu_0.shape[1]))))
# transform points from imu_0 to imu_t
pts_wrt_imu_t = np.dot( np.linalg.inv(imu_transforms_t), pts_wrt_imu_0)
# transform points from imu_t to lidar_t
pts_wrt_lidar_t = np.dot(T_from_i_to_l, pts_wrt_imu_t);
# transform points from lidar_t to camera_t
pts_wrt_camera_t = pts_wrt_lidar_t.transpose()[:, 0:3] + cam['displacement_from_l_to_c_in_lidar_frame']
pts_wrt_camera_t = dot(R_to_c_from_l(cam),
pts_wrt_camera_t.transpose())
pts_wrt_camera_t = np.vstack((pts_wrt_camera_t,
np.ones((1,pts_wrt_camera_t.shape[1]))))
pts_wrt_camera_t = dot(cam['E'], pts_wrt_camera_t)
pts_wrt_camera_t = pts_wrt_camera_t[0:3,:]
# reproject camera_t points in camera frame
(pix, mask) = cloudToPixels(cam, pts_wrt_camera_t)
return (pix, mask, pts_wrt_imu_t)
def trackbarOnchange(t, prev_t):
if abs(t - prev_t) > 1:
video_reader.setFrame(t)
if __name__ == '__main__':
args = parse_args(sys.argv[1], sys.argv[2])
cam_num = int(sys.argv[2][-5])
video_file = args['video']
params = args['params']
cam = params['cam'][cam_num-1]
video_reader = VideoReader(video_file)
gps_reader = GPSReader(args['gps'])
GPSData = gps_reader.getNumericData()
imu_transforms = IMUTransforms(GPSData)
T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i'])
all_data = np.load(sys.argv[3])
map_data = all_data['data']
#map_data = map_data[map_data[:,3] > 60, :]
# map points are defined w.r.t the IMU position at time 0
# each entry in map_data is (x,y,z,intensity,framenum).
print "Hit 'q' to quit"
trackbarInit = False
interp_grid = np.mgrid[0:IMG_WIDTH, 0:IMG_HEIGHT]
#pix_list = list()
#depth_list = list()
#img_list = list()
fps = 10 # PARAM
fourcc = cv2.cv.CV_FOURCC(*'MJPG')
video_writer = cv2.VideoWriter()
video_writer.open('tmp.avi', fourcc, fps, (IMG_WIDTH, IMG_HEIGHT)) # PARAM
video_reader.setFrame(start_fn)
#while len(pix_list) < 10:
while True:
for count in range(10):
(success, I) = video_reader.getNextFrame()
#print I.shape
if not success:
print 'Done reading video', video_file
break
t = video_reader.framenum - 1
print t
mask_window = (map_data[:,4] < t + WINDOW) & (map_data[:,4] > t )
map_data_copy = array(map_data[mask_window, :])
if map_data_copy.size == 0:
print 'Map data empty'
break
# reproject
(pix, mask, pts_wrt_imu_t) = localMapToPixels(map_data_copy, imu_transforms[t,:,:], T_from_i_to_l, cam)
# draw
pix = pix[:, mask]
intensity = map_data_copy[mask, 3]
depth = pts_wrt_imu_t[0, mask]
img_interp = griddata(pix.T, depth, interp_grid.T)
img_interp[np.isnan(img_interp)] = max(depth) # PARAM
print max(depth)
img_color = rainbow(img_interp / max(depth)) # PARAM
#plt.imshow(img_color)
#plt.show()
img_color = img_color[:, :, 0:3]
img_color = np.uint8(img_color * 255)
img_out = np.uint8(0.0 * I + 1.0*img_color)
video_writer.write(img_out)
#pix_list.append(pix[:, mask])
#depth_list.append(depth)
#img_list.append(I)
#heat_colors = heatColorMapFast(depth, 0, 100)
#for p in range(4):
#I[pix[1,mask]+p, pix[0,mask], :] = heat_colors[0,:,:]
#I[pix[1,mask], pix[0,mask]+p, :] = heat_colors[0,:,:]
#cv2.imshow(video_file, cv2.pyrDown(I))
#if not trackbarInit:
#cv2.createTrackbar('trackbar', video_file, 0, int(video_reader.total_frame_count), lambda x: trackbarOnchange(x, t))
#trackbarInit = True
#else:
#cv2.setTrackbarPos('trackbar', video_file, t)
#keycode = cv2.waitKey(1)
#if keycode == 113:
#break
video_writer.release()
print 'Played %d frames' % t
'''
# Save pickled data
import pickle
data = {
'pix_list': pix_list,
'depth_list': depth_list,
'img_list': img_list
}
pickle.dump(data, open('pix_depth.pkl', 'wb'))
'''
|
|
import cv2
import numpy as np
from xview.dataset import read_mask
import matplotlib.pyplot as plt
from xview.postprocessing import make_predictions_floodfill, make_predictions_dominant_v2
from xview.utils.inference_image_output import make_rgb_image
import pytest
@pytest.mark.parametrize(["actual", "expected"], [
("hurricane-florence_00000115_post_disaster.npy", "hurricane-florence_00000115_post_disaster.png"),
("hurricane-florence_00000475_post_disaster.npy", "hurricane-florence_00000475_post_disaster.png"),
])
def test_watershed(actual, expected):
dmg = np.load(actual)
dmg_true = read_mask(expected)
loc_cls, dmg_cls = make_predictions_dominant_v2(dmg)
plt.figure()
plt.imshow(make_rgb_image(dmg_true))
plt.show()
plt.figure()
plt.imshow(make_rgb_image(np.argmax(dmg, axis=0)))
plt.show()
plt.figure()
plt.imshow(make_rgb_image(loc_cls))
plt.show()
plt.figure()
plt.imshow(make_rgb_image(dmg_cls))
plt.show()
def test_watershed_with_image():
dmg = read_mask("test_damage_00121_prediction.png")
loc = read_mask("test_localization_00121_prediction.png")
img = cv2.imread("test_post_00121.png")
# Fix mask
dmg[loc == 0] = 0
seed = dmg.copy()
seed[loc == 0] = 0
markers = cv2.watershed(img, seed.astype(int))
markers[markers == 0] = 1
plt.figure()
plt.imshow(dmg)
plt.show()
plt.figure()
plt.imshow(loc)
plt.show()
plt.figure()
plt.imshow(markers)
plt.show()
|
|
## setup_mnist.py -- mnist data and model loading code
##
## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
import tensorflow as tf
import numpy as np
import os
import pickle
import gzip
import urllib.request
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D
from tensorflow.contrib.keras.api.keras.layers import Lambda
from tensorflow.contrib.keras.api.keras.models import load_model
from tensorflow.contrib.keras.api.keras import backend as K
def extract_data(filename, num_images):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(num_images*28*28)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data / 255) - 0.5
data = data.reshape(num_images, 28, 28, 1)
return data
def extract_labels(filename, num_images):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8)
return (np.arange(10) == labels[:, None]).astype(np.float32)
class MNIST:
def __init__(self):
if not os.path.exists("data"):
os.mkdir("data")
files = ["train-images-idx3-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-labels-idx1-ubyte.gz"]
for name in files:
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/' + name, "data/"+name)
train_data = extract_data("data/train-images-idx3-ubyte.gz", 60000)
train_labels = extract_labels("data/train-labels-idx1-ubyte.gz", 60000)
self.test_data = extract_data("data/t10k-images-idx3-ubyte.gz", 10000)
self.test_labels = extract_labels("data/t10k-labels-idx1-ubyte.gz", 10000)
VALIDATION_SIZE = 5000
self.validation_data = train_data[:VALIDATION_SIZE, :, :, :]
self.validation_labels = train_labels[:VALIDATION_SIZE]
self.train_data = train_data[VALIDATION_SIZE:, :, :, :]
self.train_labels = train_labels[VALIDATION_SIZE:]
print(" ========= data type ============")
print("data type = {}".format(self.test_data))
class MNISTModel:
def __init__(self, restore = None, session=None, use_log=False, use_brelu = False):
def bounded_relu(x):
return K.relu(x, max_value=1)
if use_brelu:
activation = bounded_relu
else:
activation = 'relu'
self.num_channels = 1
self.image_size = 28
self.num_labels = 10
model = Sequential()
model.add(Conv2D(32, (3, 3),
input_shape=(28, 28, 1)))
model.add(Activation(activation))
model.add(Conv2D(32, (3, 3)))
model.add(Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation(activation))
model.add(Conv2D(64, (3, 3)))
model.add(Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(200))
model.add(Activation(activation))
model.add(Dense(200))
model.add(Activation(activation))
model.add(Dense(10))
# output log probability, used for black-box attack
if use_log:
model.add(Activation('softmax'))
if restore:
model.load_weights(restore)
layer_outputs = []
for layer in model.layers:
if isinstance(layer, Conv2D) or isinstance(layer, Dense):
layer_outputs.append(K.function([model.layers[0].input], [layer.output]))
self.model = model
self.layer_outputs = layer_outputs
def predict(self, data):
return self.model(data)
class TwoLayerMNISTModel:
def __init__(self, restore = None, session=None, use_log=False):
self.num_channels = 1
self.image_size = 28
self.num_labels = 10
model = Sequential()
model.add(Flatten(input_shape=(28, 28, 1)))
model.add(Dense(1024))
model.add(Lambda(lambda x: x * 10))
model.add(Activation('softplus'))
model.add(Lambda(lambda x: x * 0.1))
model.add(Dense(10))
# output log probability, used for black-box attack
if use_log:
model.add(Activation('softmax'))
if restore:
model.load_weights(restore)
layer_outputs = []
for layer in model.layers:
if isinstance(layer, Conv2D) or isinstance(layer, Dense):
layer_outputs.append(K.function([model.layers[0].input], [layer.output]))
self.layer_outputs = layer_outputs
self.model = model
def predict(self, data):
return self.model(data)
class MadryMNISTModel(object):
class PredictModel(object):
def __init__(self, sess, predict_gen):
self.input = None
self.output = None
self.sess = sess
self.predict_gen = predict_gen
def predict(self, data):
if self.input is None:
print("creating a new graph for inference")
self.input = tf.placeholder(dtype=tf.float32, shape = [None, 28, 28, 1])
self.output = self.predict_gen(self.input, "Inference_MadryMNIST")
return self.sess.run([self.output], feed_dict = {self.input: data})
def __init__(self, restore = None, session=None, use_log=False):
self.num_channels = 1
self.image_size = 28
self.num_labels = 10
self.sess = session
self.restore = restore
self.use_log = use_log
self.model_file = tf.train.latest_checkpoint(restore)
if self.model_file is None:
raise(FileNotFoundError("model directory " + restore + " is invalid"))
self.model = self.PredictModel(self.sess, self.predict)
def predict(self, data, name_prefix = "MadryMNIST"):
with tf.name_scope(name_prefix):
# keep a record of the variables we created
start_vars = set(x.name for x in tf.global_variables())
# our data range is [-0.5,0.5], Madry's model is [0,1]
self.x_input = data + 0.5
self.x_image = tf.reshape(self.x_input, [-1, 28, 28, 1])
# first convolutional layer
W_conv1 = self._weight_variable([5,5,1,32])
b_conv1 = self._bias_variable([32])
h_conv1 = tf.nn.relu(self._conv2d(self.x_image, W_conv1) + b_conv1)
h_pool1 = self._max_pool_2x2(h_conv1)
# second convolutional layer
W_conv2 = self._weight_variable([5,5,32,64])
b_conv2 = self._bias_variable([64])
h_conv2 = tf.nn.relu(self._conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = self._max_pool_2x2(h_conv2)
# first fully connected layer
W_fc1 = self._weight_variable([7 * 7 * 64, 1024])
b_fc1 = self._bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# output layer
W_fc2 = self._weight_variable([1024,10])
b_fc2 = self._bias_variable([10])
pre_softmax = tf.matmul(h_fc1, W_fc2) + b_fc2
if self.use_log:
output = tf.nn.softmax(pre_softmax)
else:
output = pre_softmax
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# remove the scope name during reload
var_trans_dict = {}
for var in new_vars:
var_trans_dict[var.op.name.replace(name_prefix + '/', '')] = var
# restore model
saver = tf.train.Saver(var_list=var_trans_dict)
saver.restore(self.sess, self.model_file)
# self.model.output = output
# self.model.input = data
return output
@staticmethod
def _weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
@staticmethod
def _bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
@staticmethod
def _conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
@staticmethod
def _max_pool_2x2( x):
return tf.nn.max_pool(x,
ksize = [1,2,2,1],
strides=[1,2,2,1],
padding='SAME')
|
|
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Use-pyresample-to-make-a-projected-image" data-toc-modified-id="Use-pyresample-to-make-a-projected-image-1"><span class="toc-item-num">1 </span>Use pyresample to make a projected image</a></span></li><li><span><a href="#Read-the-lons/lats-from-the-MYD03-file" data-toc-modified-id="Read-the-lons/lats-from-the-MYD03-file-2"><span class="toc-item-num">2 </span>Read the lons/lats from the MYD03 file</a></span></li><li><span><a href="#get-the-map-projection-from-corners.json" data-toc-modified-id="get-the-map-projection-from-corners.json-3"><span class="toc-item-num">3 </span>get the map projection from corners.json</a></span></li><li><span><a href="#Use-pyresample-to-define-a-new-grid-in-this-projection" data-toc-modified-id="Use-pyresample-to-define-a-new-grid-in-this-projection-4"><span class="toc-item-num">4 </span>Use pyresample to define a new grid in this projection</a></span></li><li><span><a href="#resample-the-longitudes-on-this-grid" data-toc-modified-id="resample-the-longitudes-on-this-grid-5"><span class="toc-item-num">5 </span>resample the longitudes on this grid</a></span></li><li><span><a href="#replace-missing-values-with-floating-point-nan" data-toc-modified-id="replace-missing-values-with-floating-point-nan-6"><span class="toc-item-num">6 </span>replace missing values with floating point nan</a></span></li><li><span><a href="#Plot-the-image-using-cartopy" data-toc-modified-id="Plot-the-image-using-cartopy-7"><span class="toc-item-num">7 </span>Plot the image using cartopy</a></span></li></ul></div>
# # Use pyresample to make a projected image
#
# In the cartopy_mapping_pyproj notebook we stored projection
# coords in a json file called corners.json. This notebook
# reads that information back in to plot lats/lons on a map
# In[1]:
import a301
import json
from a301.utils.data_read import download
import a301
import pprint
import shutil
from pyhdf.SD import SD, SDC
import json
import pprint
import cartopy
from pyresample import kd_tree
read_data=False
if read_data:
filename_M3='MYD03.A2013222.2105.006.2013223155808.hdf'
download(filename_M3)
for filename in [filename_M3,filename_M2]:
local_file = Path.cwd() / Path(filename)
to_file = a301.data_dir / Path(filename)
print(f'copy {local_file} to {to_file}')
shutil.copy(local_file,to_file)
# In[2]:
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import cartopy
from pathlib import Path
import pprint
import numpy as np
import pdb
#
# # Read the lons/lats from the MYD03 file
#
# **substitute your filename**
# In[3]:
# Read the lats and lons from the MYD03 file
filename_M3='MYD03.A2013222.2105.006.2013223155808.hdf'
m3_path= a301.data_dir / Path(filename_M3)
print(f'reading {m3_path}')
m3_file = SD(str(m3_path), SDC.READ)
lats = m3_file.select('Latitude').get()
lons = m3_file.select('Longitude').get()
# # get the map projection from corners.json
#
# Get the map projection and extent from corners.json
# In[4]:
json_file = a301.data_dir / Path('corners.json')
with open(json_file,'r') as f:
map_dict=json.load(f)
pprint.pprint(map_dict)
# # Use pyresample to define a new grid in this projection
# In[5]:
from pyresample import load_area, save_quicklook, SwathDefinition
proj_params = map_dict['proj4_params']
swath_def = SwathDefinition(lons, lats)
area_def=swath_def.compute_optimal_bb_area(proj_dict=proj_params)
# In[6]:
dir(area_def)
# # resample the longitudes on this grid
# In[7]:
fill_value=-9999.
area_name = 'modis swath 5min granule'
image_lons = kd_tree.resample_nearest(swath_def, lons.ravel(),
area_def, radius_of_influence=5000,
nprocs=2,fill_value=fill_value)
print(f'\ndump area definition:\n{area_def}\n')
print((f'\nx and y pixel dimensions in meters:'
f'\n{area_def.pixel_size_x}\n{area_def.pixel_size_y}\n'))
# # replace missing values with floating point nan
# In[8]:
nan_value = np.array([np.nan],dtype=np.float32)[0]
image_lons[image_lons< -9000]=nan_value
# # Plot the image using cartopy
# In[9]:
crs = area_def.to_cartopy_crs()
ax = plt.axes(projection=crs)
ax.coastlines()
ax.set_global()
plt.imshow(image_lons, transform=crs, extent=crs.bounds, origin='upper')
plt.colorbar();
# In[27]:
crs.globe.to_proj4_params()
|
|
from __future__ import print_function
import pickle
import numpy as np
from scipy.optimize import curve_fit
# Reference each pick to a station index
def getPickStaIdxs(pickSet,staNames):
pickStas,pickIdx=np.unique(pickSet[:,0],return_inverse=True)
pickStaIdxs=np.ones(len(pickIdx),dtype=int)*-1
for pos in np.unique(pickIdx):
sta=pickStas[pos]
if sta in staNames:
posArgs=np.where(pickIdx==pos)[0]
pickStaIdxs[posArgs]=np.where(staNames==sta)[0][0]
# Remove any picks which did not have station metadata
keepArgs=np.where(pickStaIdxs!=-1)[0]
pickStaIdxs=pickStaIdxs[keepArgs]
pickSet=pickSet[keepArgs]
return pickSet,pickStaIdxs
# Convert spherical coord numpy array to cartesian
# [[Lon1,Lat1],[Lon2,Lat2],...]
def sph2xyz(p):
p[:,1]+=90
# Convert from deg to rad
p=np.pi*p/180.0
xyz=[np.sin(p[:,1])*np.cos(p[:,0]),
np.sin(p[:,1])*np.sin(p[:,0]),
np.cos(p[:,1])]
return np.array(xyz).T
# Convert from cartesion to spherical
def xyz2sph(p):
lat=np.arccos(p[2])
lon=np.arctan2(p[1],p[0])
out=np.array([lon,lat])
# Convert from rad to deg
out*=180.0/np.pi
out[1]-=90
return out
# Get the angle in degrees between two vectors...
# ...assumes both vectors have a length of one (clips for rounding error in dot product)
# ...this is done to ignore speed reduction in repeatedly normalizing the station xyz vectors
def vecsAngle(v1,v2):
return np.arccos(np.clip(np.dot(v1,v2),-1,1))*180.0/np.pi
# 3rd order polynomial fixed at 0,0
def poly3_Fixed(x,a,b,c):
return np.clip(a*x**1+b*x**2+c*x**3,0,2000)
# 3rd order polynomial
def poly3(x,a,b,c,d):
return np.clip(a*x**1+b*x**2+c*x**3+d,0,2000)
# Function to calculate the arrival times vs epicentral location
# ti=function((staX,staY,staZ,isPphase,isSphase,p1,p2,p3,s1,s2,s3),x0,y0,z0,t0)
# ti=t0+travelTime
def globalLocEpiFunc(data,x0,y0,z0,t0):
eveXyz=np.array([x0,y0,z0])
eveXyz=eveXyz/np.sqrt(np.sum(eveXyz**2)) # Normalize to length one
paramP,paramS=data[:,5:8],data[:,8:11]
degDists=vecsAngle(data[:,:3],eveXyz)
ttP=poly3_Fixed(degDists,paramP[:,0],paramP[:,1],paramP[:,2])
ttS=poly3_Fixed(degDists,paramS[:,0],paramS[:,1],paramS[:,2])
return t0+data[:,3]*ttP+data[:,4]*ttS
# Function to calculate the arrival times vs depth
# ti=function((isPphase,isSphase,p1,p2,p3,p4,s1,s2,s3,s4),z0,t0)
# ti=t0+travelTime
def globalLocDepFunc(data,z0,t0):
paramP,paramS=data[:,2:6],data[:,6:10]
ttP=poly3(z0,paramP[:,0],paramP[:,1],paramP[:,2],paramP[:,3])
ttS=poly3(z0,paramS[:,0],paramS[:,1],paramS[:,2],paramS[:,3])
return t0+data[:,0]*ttP+data[:,1]*ttS
# Recalculate the current event-station distances with current event location
def recalcDegDists(data,params):
# Normalize the params, as were not constrained in "curve_fit" to have magnitude of 1
params[:3]=params[:3]/np.sqrt(np.sum(params[:3]**2))
degDists=vecsAngle(data[:,:3],params[:3])
return degDists,params
# Use travel times from the IASP91 model model to estimate the event location
# Gives approximate event location, less appropriate with smaller networks
# Depth is constrained from 0 to 200 km
# Method: A coarse search is done first using a third order polynomial fit, varying epicenter...
# ...a refined epicentral search is done using O(3) polynomials fits about the predicted station-event distances...
# ...a final search vs depth is done using O(3) polynomials at the refined station-event distances
# Special note: the scipy curve_fit function does not constrain variables, thus the length 1 vector...
# ...representing the surface position will not remain length 1; this effect is "ignored" as the...
# ...travel times are related to distance in degrees and so the orientation of this vector is all that matters
def globalLocate(pickSet,staLoc,mainPath,customDict,staProjStyle):
# import time
# now=time.time()
# Nothing to do if no picks, or not in lon,lats
if 0 in pickSet.shape:
return np.empty((0,5)),customDict
elif staProjStyle!='None':
print('Global locate requires stations to use lon,lat')
return np.empty((0,5)),customDict
# Load in the model (if not already loaded into the custom dictionary)
if 'globEpiDict' not in customDict.keys():
with open(mainPath+'/Plugins/epiTTparams.pickle','rb') as aFile:
customDict['globEpiDict']=pickle.load(aFile)
with open(mainPath+'/Plugins/depTTparams.pickle','rb') as aFile:
customDict['globDepDict']=pickle.load(aFile)
ttEpiDict=customDict['globEpiDict']
ttDepDict=customDict['globDepDict']
# Remove anything but P and S picks
for i,entry in enumerate(pickSet[:,1]):
pickSet[i,1]=pickSet[i,1][0]
pickSet=pickSet[np.where((pickSet[:,1]=='P')|(pickSet[:,1]=='S'))]
# Get only picks which have station metadata
pickSet,pickStaIdxs=getPickStaIdxs(pickSet,staLoc[:,0])
# Generate the data for the rough global curve fitting...
# ...(staX,staY,staZ,isPphase,isSphase,p1,p2,p3,s1,s2,s3)
data=np.zeros((len(pickSet),11),dtype=float)
# ...set the station locations
data[:,:3]=sph2xyz(staLoc[pickStaIdxs,1:3].astype(float))
# ...set phase markers
data[:,3]=pickSet[:,1]=='P'
data[:,4]=pickSet[:,1]=='S'
# ...set the coefficients for the global travel time calculations
data[:,5:]=np.concatenate((ttEpiDict['globP'],ttEpiDict['globS']))
# ...get the "y" values to fit the curve to
Tref=np.min(pickSet[:,2].astype(float))
pickTimes=pickSet[:,2].astype(float)-Tref
# Initial guess on origin, uses station location with earliest pick
testLoc=data[np.argmin(pickTimes),:3]
testLoc=np.concatenate((testLoc,[0]))
# Solve for event origin parameters
try:
params, pcov = curve_fit(globalLocEpiFunc,data,pickTimes,testLoc,
bounds=([-1,-1,-1,-2000],[1,1,1,2000]))
except:
print('Global locator failed')
return np.empty((0,5)),customDict
degDists,params=recalcDegDists(data,params) # Normalize params
# Update the data for the refined global curve fitting...
# ...figure out which pre-fit parameters should be taken for each pick
# (could be by station but likely fast enough)
bins=np.arange(0-0.5*ttEpiDict['spacing'],
180+0.51*ttEpiDict['spacing'],ttEpiDict['spacing'])
idxs=np.digitize(degDists,bins)-1
data[:,5:8]=ttEpiDict['pParams'][idxs]
data[:,8:11]=ttEpiDict['sParams'][idxs]
# Fit again with refined parameters, use previous location as starting position
try:
params, pcov = curve_fit(globalLocEpiFunc,data,pickTimes,params,
bounds=([-1,-1,-1,-2000],[1,1,1,2000]))
except:
print('Global locator failed')
return np.empty((0,5)),customDict
# Convert back from the xyz to lon,lat
degDists,params=recalcDegDists(data,params) # Normalize params
lon,lat=xyz2sph(params[:3])
# Generate the data for the depth curve fitting...
bins=np.arange(0-0.5*ttDepDict['spacing'],
180+0.51*ttDepDict['spacing'],ttDepDict['spacing'])
idxs=np.digitize(degDists,bins)-1
dataDep=np.zeros((len(data),10),dtype=float)
dataDep[:,:2]=data[:,3:5]
dataDep[:,2:6]=ttDepDict['pParams'][idxs]
dataDep[:,6:10]=ttDepDict['sParams'][idxs]
# Fit again with depth parameters, use model starting depth and previous origin time as starting position
try:
depParams, pcov = curve_fit(globalLocDepFunc,dataDep,pickTimes,[ttEpiDict['startDep'],params[-1]],
bounds=([0,-2000],[200,2000]))
except:
print('Global locator failed')
return np.empty((0,5)),customDict
# print time.time()-now,len(pickSet)
# print lon,lat,depParams[0]
# print(np.sum(np.abs(pickTimes-globalLocEpiFunc(data,*params)))/len(data),'preAvgResid')
# print(np.sum(np.abs(pickTimes-globalLocDepFunc(dataDep,*depParams)))/len(data),'postAvgResig')
return np.array([[0,lon,lat,depParams[0],depParams[1]+Tref]]),customDict
|
|
from PIL import Image
from pokescrapping import download_photo
import numpy
import json
def load_db(dex_db=None):
try:
db_file = open('dexdb.json')
data = json.load(db_file)
if dex_db is not None:
return data[dex_db]
else:
return data
except:
return None
def print_asciiart(num_pokemon = 0):
'''
Imprime en pantalla el asciiart del pokemon consultado
'''
def get_image():
image_file = 'pokemon_images/'+ '{:04d}'.format(num_pokemon)+'.png'
try:
return Image.open(image_file)
except:
print("Obteniendo imagen, porfavor espere...\r", end='')
if download_photo(num_pokemon):
return Image.open(image_file)
else:
print('No se logró encontrar la imagen para el pokémon seleccionado')
return None
# Configuración del asciiart
chars = numpy.asarray(list(' .,:;irsXA253hMHGS#9B&@'))
SC = float(0.15)
GCF = float(1)
WCF = 7.0/4.0
# Impresión del pokémon en la consola
img_pokemon = get_image()
if img_pokemon is not None:
S = (int(img_pokemon.size[0]*SC*WCF), int(img_pokemon.size[1]*SC))
img_pokemon = numpy.sum( numpy.asarray(img_pokemon.resize(S), dtype="float"), axis=2)
img_pokemon -= img_pokemon.min()
img_pokemon = (1.0 - img_pokemon/img_pokemon.max())**GCF*(chars.size-1)
print("\n".join(("".join(r) for r in chars[img_pokemon.astype(int)])))
print()
def get_data(pokemon=None):
pokemon_data = load_db('pokemon')
if pokemon_data is not None:
if pokemon is None:
return pokemon_data
if pokemon in pokemon_data:
return pokemon_data[pokemon]
else:
print("Base de datos no encontrada")
return None
def get_move(move):
move_list = load_db('move_list')
return move_list[move] if move in move_list else None
def get_learnset(pokemon):
learnset = load_db('learnset')
if learnset is not None:
return learnset[pokemon] if pokemon in learnset else []
else:
print("Base de datos no encontrada")
def check_dex():
print('Comprobando integridad de los datos, por favor espere...')
if load_db() is not None:
data = []
ok_data = []
count_error = 0
count_ok = 0
for pokemon in get_data().keys():
learnset = get_learnset(str(pokemon))
if learnset is not []:
for i, move in enumerate(learnset):
print(pokemon, ':',int(i/len(learnset)*100), '% \r', end='')
try:
m = get_move(move)['name']
ok_data.append(m)
count_ok += 1
except:
if move not in data:
data.append(move)
count_error+=1
if count_error > 0:
print(data)
print('We found {} errors.\nThese attack are not in dex: '.format(count_error))
else:
print('No error found')
print('Data ok ', count_ok)
else:
print("Base de datos no encontrada")
|
|
# -*- coding: UTF-8 -*-
"""StyleGAN architectures.
"""
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
from .base import StyleGAN
from _int import FMAP_SAMPLES, RES_INIT
from utils.latent_utils import gen_rand_latent_vars
from utils.custom_layers import Lambda, get_blur_op, NormalizeLayer, \
Conv2dEx, LinearEx, Conv2dBias
import copy
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
FMAP_G_INIT_FCTR = 1
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class StyleMappingNetwork( nn.Module ):
"""Mapping Network for StyleGAN architecture."""
def __init__( self,
len_latent = 512,
len_dlatent = 512,
num_fcs = 8,
lrmul = .01,
nl = nn.LeakyReLU( negative_slope = .2 ),
equalized_lr = True,
normalize_z = True ):
super( StyleMappingNetwork, self ).__init__()
if normalize_z:
self.preprocess_z = nn.Sequential(
Lambda( lambda x: x.view( -1, len_latent ) ),
NormalizeLayer( 'PixelNorm' )
)
else:
self.preprocess_z = Lambda( lambda x: x.view( -1, len_latent ) )
self.dims = np.linspace( len_latent, len_dlatent, num_fcs + 1 ).astype( np.int64 )
self.fc_mapping_model = nn.Sequential( )
for seq_n in range( num_fcs ):
self.fc_mapping_model.add_module(
'fc_' + str( seq_n ),
LinearEx( nin_feat = self.dims[seq_n], nout_feat = self.dims[seq_n+1],
init = 'He', init_type = 'StyleGAN', gain_sq_base = 2.,
equalized_lr = equalized_lr, lrmul = lrmul )
)
self.fc_mapping_model.add_module( 'nl_' + str( seq_n ), nl )
def forward( self, x ):
return self.fc_mapping_model( self.preprocess_z( x ) )
class StyleConditionedMappingNetwork( nn.Module ):
"""Class-conditioned version of Mapping Network for StyleGAN architecture."""
def __init__( self,
num_classes,
len_latent = 512,
len_dlatent = 512,
num_fcs = 8,
lrmul = .01,
nl = nn.LeakyReLU( negative_slope = .2 ),
equalized_lr = True,
normalize_z = True,
embed_cond_vars = True ):
super( StyleConditionedMappingNetwork, self ).__init__()
self.len_latent = len_latent
self.num_classes = num_classes
self.embed_cond_vars = embed_cond_vars
if embed_cond_vars:
self.class_embedding = LinearEx( nin_feat = num_classes, nout_feat = len_latent,
init = None, init_type = 'Standard Normal', include_bias = False )
self.dims = np.linspace( len_latent, len_dlatent, num_fcs ).astype( np.int64 )
self.dims = np.insert( self.dims, 0, 2*len_latent if self.embed_cond_vars else len_latent + num_classes )
self.fc_mapping_model = nn.Sequential( )
if normalize_z:
self.fc_mapping_model.add_module( 'pixelnorm', NormalizeLayer( 'PixelNorm' ) )
for seq_n in range( num_fcs ):
self.fc_mapping_model.add_module(
'fc_' + str( seq_n ),
LinearEx( nin_feat = self.dims[seq_n], nout_feat = self.dims[seq_n+1],
init = 'He', init_type = 'StyleGAN', gain_sq_base = 2.,
equalized_lr = equalized_lr, lrmul = lrmul )
)
self.fc_mapping_model.add_module( 'nl_' + str( seq_n ), nl )
def forward( self, x, y ):
y = y.view( -1, self.num_classes )
if self.embed_cond_vars:
y = self.class_embedding( y )
return self.fc_mapping_model( torch.cat( ( x.view( -1, self.len_latent ), y, ), dim = 1 ) )
class StyleAddNoise( nn.Module ):
"""Simple `nn.Module` that adds weighted uncorrelated Gaussian noise to a layer of feature maps."""
def __init__( self, nf ):
super( StyleAddNoise, self ).__init__()
self.noise_weight = nn.Parameter( torch.FloatTensor( 1, nf, 1, 1 ).fill_( 0 ) )
def forward( self, x, noise = None ):
if self.training or noise is None:
# for training mode or when user does not supply noise in evaluation mode:
return x + self.noise_weight * \
torch.randn( x.shape[0], 1, x.shape[2], x.shape[3], dtype = torch.float32, device = x.device )
else:
# for if when user supplies noise in evaluation mode:
return x + self.noise_weight * noise
# ............................................................................ #
# Generator:
# ----------
# TODO: Implement more efficient "recursive structure" by Tero Karras for training (see their GitHub implementation)
class StyleGenerator( StyleGAN ):
"""StyleGAN (Karras et al. 2019) Generator
Emulates most recent official implementation.
"""
def __init__( self,
final_res,
latent_distribution = 'normal',
len_latent = 512,
len_dlatent = 512,
mapping_num_fcs = 8,
mapping_lrmul = .01,
use_instancenorm = True,
use_noise = True,
upsampler = nn.Upsample( scale_factor = 2, mode = 'nearest' ),
blur_type = None,
nl = nn.LeakyReLU( negative_slope = .2 ),
num_classes = 0,
equalized_lr = True,
normalize_z = True,
use_pixelnorm = False,
pct_mixing_reg = .9,
truncation_trick_params = { 'beta': .995, 'psi': .7, 'cutoff_stage': 4 } ):
super( self.__class__, self ).__init__( final_res )
self.gen_layers = nn.ModuleList( )
self.upsampler = upsampler
self.upsampler_skip_connection = \
lambda xb: F.interpolate( xb, scale_factor = 2, mode = 'nearest' ) # keep fading-in layers simple
self.gen_blur_type = blur_type
self.nl = nl
self.equalized_lr = equalized_lr
self.pct_mixing_reg = pct_mixing_reg
self._use_mixing_reg = True if pct_mixing_reg else False
self.latent_distribution = latent_distribution
self.len_latent = len_latent
self.len_dlatent = len_dlatent
assert isinstance( num_classes, int )
self.num_classes = num_classes
# Mapping Network initialization:
if not num_classes:
self.z_to_w = StyleMappingNetwork(
len_latent = len_latent,
len_dlatent = len_dlatent,
num_fcs = mapping_num_fcs,
lrmul = mapping_lrmul,
nl = nl,
equalized_lr = equalized_lr,
normalize_z = normalize_z
)
else:
self.z_to_w = StyleConditionedMappingNetwork(
num_classes,
len_latent = len_latent,
len_dlatent = len_dlatent,
num_fcs = mapping_num_fcs,
lrmul = mapping_lrmul,
nl = nl,
equalized_lr = equalized_lr,
normalize_z = normalize_z
)
_fmap_init = len_latent * FMAP_G_INIT_FCTR
# initializing the input to 1 has about the same effect as applyng PixelNorm to the input
self.const_input = nn.Parameter(
torch.FloatTensor( 1, _fmap_init, RES_INIT, RES_INIT ).fill_( 1 )
)
self._use_noise = use_noise
self._trained_with_noise = use_noise
if use_noise:
conv = Conv2dEx( ni = _fmap_init, nf = self.fmap, ks = 3,
stride = 1, padding = 1, init = 'He', init_type = 'StyleGAN',
gain_sq_base = 2., equalized_lr = equalized_lr, include_bias = False )
noise = [
StyleAddNoise( nf = _fmap_init ),
StyleAddNoise( nf = self.fmap ),
]
bias = (
[ Conv2dBias( nf = _fmap_init ) ],
[ Conv2dBias( nf = self.fmap ) ],
)
else:
conv = Conv2dEx( ni = _fmap_init, nf = self.fmap, ks = 3,
stride = 1, padding = 1, init = 'He', init_type = 'StyleGAN',
gain_sq_base = 2., equalized_lr = equalized_lr, include_bias = True )
# noise = ( [], [], )
noise = [ None, None ]
bias = ( [], [], ) # NOTE: without noise, the bias would get directly added to the constant input, so the constant input can just learn this bias,
# so theoretically, there shouldn't be a need to include the bias either. There may be numerical approximation problems from backprop, however.
norms = []
self.use_pixelnorm = use_pixelnorm
if use_pixelnorm:
norms.append( NormalizeLayer( 'PixelNorm' ) )
self.use_instancenorm = use_instancenorm
if use_instancenorm:
norms.append( NormalizeLayer( 'InstanceNorm' ) )
w_to_styles = (
LinearEx( nin_feat = self.z_to_w.dims[ -1 ], nout_feat = 2 * _fmap_init,
init = 'He', init_type = 'StyleGAN', gain_sq_base = 1., equalized_lr = equalized_lr ),
LinearEx( nin_feat = self.z_to_w.dims[ -1 ], nout_feat = 2 * self.fmap,
init = 'He', init_type = 'StyleGAN', gain_sq_base = 1., equalized_lr = equalized_lr ),
)
assert 0. <= truncation_trick_params[ 'beta' ] <= 1.
self.w_ewma_beta = truncation_trick_params[ 'beta' ]
self._w_eval_psi = truncation_trick_params[ 'psi' ] # allow psi to be any number you want, perhaps worthy of experimentation
assert ( ( isinstance( truncation_trick_params[ 'cutoff_stage' ], int ) and \
0 < truncation_trick_params[ 'cutoff_stage' ] <= int( np.log2( self.final_res ) ) - 2 ) or \
truncation_trick_params[ 'cutoff_stage' ] is None )
self._trunc_cutoff_stage = truncation_trick_params[ 'cutoff_stage' ]
# set the below to `False` if you want to turn off during evaluation mode
self.use_truncation_trick = True if self._trunc_cutoff_stage else False
self.w_ewma = None
self.gen_layers.append(
nn.ModuleList( [
None,
noise[0],
nn.Sequential( *bias[0], nl, *norms ),
w_to_styles[0]
] )
)
self.gen_layers.append(
nn.ModuleList( [
conv,
noise[1],
nn.Sequential( *bias[1], nl, *norms ),
w_to_styles[1]
] )
)
self.prev_torgb = None
self._update_torgb( ni = self.fmap )
def increase_scale( self ):
"""Use this to increase scale during training or for initial resolution."""
# update metadata
if not self.scale_inc_metadata_updated:
super( self.__class__, self ).increase_scale()
else:
self.scale_inc_metadata_updated = False
blur_op = get_blur_op( blur_type = self.gen_blur_type, num_channels = self.fmap ) if \
self.gen_blur_type is not None else None
self.gen_layers.append(
self.get_conv_layer( ni = self.fmap_prev, upsample = True, blur_op = blur_op )
)
self.gen_layers.append(
self.get_conv_layer( ni = self.fmap )
)
self.prev_torgb = copy.deepcopy( self.torgb )
self._update_torgb( ni = self.fmap )
def get_conv_layer( self, ni, upsample = False, blur_op = None, append_nl = True ):
upsampler = []
if upsample:
upsampler.append( self.upsampler )
if self.use_noise or blur_op is not None:
conv = Conv2dEx( ni = ni, nf = self.fmap, ks = 3, stride = 1, padding = 1,
init = 'He', init_type = 'StyleGAN', gain_sq_base = 2.,
equalized_lr = self.equalized_lr, include_bias = False )
bias = [ Conv2dBias( nf = self.fmap ) ]
else:
conv = Conv2dEx( ni = ni, nf = self.fmap, ks = 3, stride = 1, padding = 1,
init = 'He', init_type = 'StyleGAN', gain_sq_base = 2.,
equalized_lr = self.equalized_lr, include_bias = True )
bias = []
blur = []
if blur_op is not None:
assert isinstance( blur_op, nn.Module )
blur.append( blur_op )
noise = None
if self.use_noise:
noise = StyleAddNoise( nf = self.fmap )
nl = []
if append_nl:
nl.append( self.nl )
norms = []
if self.use_pixelnorm:
norms.append( NormalizeLayer( 'PixelNorm' ) )
if self.use_instancenorm:
norms.append( NormalizeLayer( 'InstanceNorm' ) )
w_to_style = LinearEx( nin_feat = self.z_to_w.dims[ -1 ], nout_feat = 2*self.fmap,
init = 'He', init_type = 'StyleGAN', gain_sq_base = 1.,
equalized_lr = self.equalized_lr )
return nn.ModuleList( [ nn.Sequential( *upsampler, conv, *blur ),
noise,
nn.Sequential( *( bias + nl + norms ) ),
w_to_style ] )
# return nn.ModuleList( [ nn.Sequential( *upsampler, conv, *( blur + noise + bias + nl + norms ) ), w_to_style ] )
def _update_torgb( self, ni ):
self.torgb = Conv2dEx( ni = ni, nf = FMAP_SAMPLES, ks = 1, stride = 1,
padding = 0, init = 'He', init_type = 'StyleGAN',
gain_sq_base = 1., equalized_lr = self.equalized_lr )
def train( self, mode = True ):
"""Overwritten to turn on mixing regularization (if > 0%) during training mode."""
super( self.__class__, self ).train( mode = mode )
self._use_noise = self._trained_with_noise
self._use_mixing_reg = True if self.pct_mixing_reg else False
def eval( self ):
"""Overwritten to turn off mixing regularization during evaluation mode."""
super( self.__class__, self ).eval( )
self._use_mixing_reg = False
def to( self, *args, **kwargs ):
"""Overwritten to allow for non-Parameter objects' Tensors to be sent to the appropriate device."""
super( self.__class__, self ).to( *args, **kwargs )
for arg in args:
if arg in ( 'cpu', 'cuda', ) or isinstance( arg, torch.device ):
if self.w_ewma is not None:
self.w_ewma = self.w_ewma.to( arg )
break
@property
def use_noise( self ):
return self._use_noise
@use_noise.setter
def use_noise( self, mode ):
"""Allows for optionally evaluating without noise inputs."""
if self.training:
raise Exception( 'Once use_noise argument is set, it cannot be changed' + \
' for training purposes. It can, however, be changed in eval mode.' )
elif not self._trained_with_noise:
raise Exception( 'Model was not trained with noise, so cannot use noise in eval mode.' )
else:
self._use_noise = mode
@property
def w_eval_psi( self ):
return self._w_eval_psi
@w_eval_psi.setter
def w_eval_psi( self, new_w_eval_psi ):
"""Change this to your choosing (but only in evaluation mode), optionally allowing for |psi| to be > 1."""
if not self.training:
self._w_eval_psi = new_w_eval_psi
else:
raise Exception( 'Can only alter psi value for truncation trick on w during evaluation mode.' )
@property
def trunc_cutoff_stage( self ):
return self._trunc_cutoff_stage
@trunc_cutoff_stage.setter
def trunc_cutoff_stage( self, new_trunc_cutoff_stage ):
"""Change this to your choosing (but only in evaluation mode)."""
if not self.training:
_final_stage = int( np.log2( self.final_res ) ) - 1
if ( isinstance( new_trunc_cutoff_stage, int ) and \
0 < new_trunc_cutoff_stage <= _final_stage ) or new_trunc_cutoff_stage is None:
self._trunc_cutoff_stage = new_trunc_cutoff_stage
else:
message = f'Input cutoff stage for truncation trick on w must be of type `int` in range (0,{_final_stage}] or `None`.'
raise ValueError( message )
else:
raise Exception( 'Can only alter cutoff stage for truncation trick on w during evaluation mode.' )
def forward( self, x, x_mixing = None, style_mixing_stage:int = None, noise = None ):
# TODO: Implement the ability to style-mix more than just 2 styles in eval mode
# TODO: Implement the ability to input the disentangled latent variable w directly
cutoff_idx = None
# Training Mode Only:
if self._use_mixing_reg:
if np.random.rand() < self.pct_mixing_reg:
if self.alpha != 0:
cutoff_idx = torch.randint( 1, 2*self.scale_stage, ( 1, ) ).item()
else:
cutoff_idx = torch.randint( 1, 2*self.scale_stage - 2, ( 1, ) ).item()
x = self.z_to_w( x )
bs = x.shape[0]
if self.use_truncation_trick:
# Training Mode Only:
if self.training:
if self.w_ewma is None:
self.w_ewma = x.detach().clone().mean( dim = 0 )
else:
with torch.no_grad():
# TODO: Implement a memory-efficient method to compute this for the ewma generator
# (currently just using the same average w for the generator and the ewma generator)
self.w_ewma = x.mean( dim = 0 ) * ( 1. - self.w_ewma_beta ) + \
self.w_ewma * ( self.w_ewma_beta )
# Evaluation Mode Only:
elif self.trunc_cutoff_stage is not None:
x = self.w_ewma.expand_as( x ) + self.w_eval_psi * ( x - self.w_ewma.expand_as( x ) )
out = self.const_input.expand( bs, -1, -1, -1 )
if self.fade_in_phase:
for n, layer in enumerate( self.gen_layers[ :-2 ] ):
if n:
out = layer[ 0 ]( out )
if self.use_noise:
out = layer[ 1 ]( out, noise = noise[ n ] if noise is not None else None )
out = layer[ 2 ]( out )
if n == cutoff_idx:
# TODO: Implement embedding-style conditioning from "Which Training Methods for
# GANs do actually Converge" & discriminator conditioning.
x = gen_rand_latent_vars( num_samples = bs, length = self.len_latent,
distribution = self.latent_distribution, device = x.device )
x.requires_grad_( True )
x = self.z_to_w( x )
y = layer[ 3 ]( x ).view( -1, 2, layer[ 3 ].nout_feat // 2, 1, 1 )
out = out * ( y[ :, 0 ].contiguous().add( 1 ) ) + \
y[ :, 1 ].contiguous() # add 1 for skip-connection effect
# TODO: there should be a cleaner way to do the fading-in part while remaining memory-efficient...
n += 1
if n == cutoff_idx:
x = gen_rand_latent_vars( num_samples = bs, length = self.len_latent,
distribution = self.latent_distribution, device = x.device )
x.requires_grad_( True )
x = self.z_to_w( x )
y = self.gen_layers[ -2 ][ 3 ]( x ).view( -1, 2, self.gen_layers[ -2 ][ 3 ].nout_feat // 2, 1, 1 )
n += 1
if n == cutoff_idx:
x = gen_rand_latent_vars( num_samples = bs, length = self.len_latent,
distribution = self.latent_distribution, device = x.device )
x.requires_grad_( True )
x = self.z_to_w( x )
yf = self.gen_layers[ -1 ][ 3 ]( x ).view( -1, 2, self.gen_layers[ -1 ][ 3 ].nout_feat // 2, 1, 1 )
if self.use_noise:
return self.upsampler_skip_connection( self.prev_torgb( out ) ) * ( 1. - self.alpha ) + \
self.torgb(
self.gen_layers[ -1 ][ 2 ]( self.gen_layers[ -1 ][ 1 ]( self.gen_layers[ -1 ][ 0 ](
self.gen_layers[ -2 ][ 2 ]( self.gen_layers[ -2 ][ 1 ]( self.gen_layers[ -2 ][ 0 ]( out ), noise = noise[ -2 ] if noise is not None else None ) ) * ( y[ :, 0 ].contiguous().add( 1 ) ) + y[ :, 1 ].contiguous()
), noise = noise[ -1 ] if noise is not None else None ) ) * ( yf[ :, 0 ].contiguous().add( 1 ) ) + yf[ :, 1 ].contiguous()
) * ( self.alpha )
else:
return self.upsampler_skip_connection( self.prev_torgb( out ) ) * ( 1. - self.alpha ) + \
self.torgb(
self.gen_layers[ -1 ][ 2 ]( self.gen_layers[ -1 ][ 0 ](
self.gen_layers[ -2 ][ 2 ]( self.gen_layers[ -2 ][ 0 ]( out ) ) * ( y[ :, 0 ].contiguous().add( 1 ) ) + y[ :, 1 ].contiguous()
) ) * ( yf[ :, 0 ].contiguous().add( 1 ) ) + yf[ :, 1 ].contiguous()
) * ( self.alpha )
else:
for n, layer in enumerate( self.gen_layers ):
if n:
out = layer[ 0 ]( out )
if self.use_noise:
out = layer[ 1 ]( out, noise = noise[ n ] if noise is not None else None )
out = layer[ 2 ]( out )
# Training Mode Only:
if n == cutoff_idx:
# TODO: Implement embedding-style conditioning from "Which Training Methods for
# GANs do actually Converge" & discriminator conditioning.
x = gen_rand_latent_vars( num_samples = bs, length = self.len_latent,
distribution = self.latent_distribution, device = x.device )
x.requires_grad_( True )
x = self.z_to_w( x )
# Evaluation Mode Only:
if n == style_mixing_stage:
assert ( style_mixing_stage and not self.training and isinstance( x_mixing, torch.Tensor ) )
x = self.z_to_w( x_mixing )
# the new z that is sampled for style-mixing is already de-truncated
if self.use_truncation_trick and self.trunc_cutoff_stage is not None and n < 2*self.trunc_cutoff_stage:
x = self.w_ewma.expand_as( x ) + self.w_eval_psi * ( x - self.w_ewma.expand_as( x ) )
elif self.use_truncation_trick and not self.training and self.trunc_cutoff_stage is not None and n == 2*self.trunc_cutoff_stage:
# de-truncate w for higher resolutions; more memory-efficient than defining 2 w's
x = ( x - self.w_ewma.expand_as( x ) ).div( self.w_eval_psi ) + self.w_ewma.expand_as( x )
y = layer[ 3 ]( x ).view( -1, 2, layer[ 3 ].nout_feat // 2, 1, 1 )
out = out * ( y[ :, 0 ].contiguous().add( 1 ) ) + \
y[ :, 1 ].contiguous() # add 1 for skip-connection effect
return self.torgb( out )
|
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.nets.resnet."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import test_utils
from haiku._src.nets import resnet
import jax.numpy as jnp
class ResnetTest(parameterized.TestCase):
@test_utils.combined_named_parameters(test_utils.named_bools("resnet_v2"),
test_utils.named_bools("bottleneck"))
@test_utils.transform_and_run
def test_simple(self, resnet_v2, bottleneck):
image = jnp.ones([2, 64, 64, 3])
model = resnet.ResNet([1, 1, 1, 1], 10,
resnet_v2=resnet_v2,
bottleneck=bottleneck)
for is_training in (True, False):
logits = model(image, is_training=is_training)
self.assertEqual(logits.shape, (2, 10))
@parameterized.parameters(3, 5)
@test_utils.transform_and_run
def test_error_incorrect_args_block_list(self, list_length):
block_list = [i for i in range(list_length)]
with self.assertRaisesRegex(
ValueError, "blocks_per_group` must be of length 4 not {}".format(
list_length)):
resnet.ResNet(block_list, 10, {"decay_rate": 0.9, "eps": 1e-5})
@parameterized.parameters(3, 5)
@test_utils.transform_and_run
def test_error_incorrect_args_channel_list(self, list_length):
channel_list = [i for i in range(list_length)]
with self.assertRaisesRegex(
ValueError,
"channels_per_group` must be of length 4 not {}".format(
list_length)):
resnet.ResNet([1, 1, 1, 1], 10, {"decay_rate": 0.9, "eps": 1e-5},
channels_per_group=channel_list)
if __name__ == "__main__":
absltest.main()
|
|
# tts 推理引擎,支持流式与非流式
# 精简化使用
# 用 onnxruntime 进行推理
# 1. 下载对应的模型
# 2. 加载模型
# 3. 端到端推理
# 4. 流式推理
import base64
import numpy as np
from paddlespeech.server.utils.onnx_infer import get_sess
from paddlespeech.t2s.frontend.zh_frontend import Frontend
from paddlespeech.server.utils.util import denorm, get_chunks
from paddlespeech.server.utils.audio_process import float2pcm
from paddlespeech.server.utils.config import get_config
from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine
class TTS:
def __init__(self, config_path):
self.config = get_config(config_path)['tts_online-onnx']
self.config['voc_block'] = 36
self.engine = TTSEngine()
self.engine.init(self.config)
self.engine.warm_up()
# 前端初始化
self.frontend = Frontend(
phone_vocab_path=self.engine.executor.phones_dict,
tone_vocab_path=None)
def depadding(self, data, chunk_num, chunk_id, block, pad, upsample):
"""
Streaming inference removes the result of pad inference
"""
front_pad = min(chunk_id * block, pad)
# first chunk
if chunk_id == 0:
data = data[:block * upsample]
# last chunk
elif chunk_id == chunk_num - 1:
data = data[front_pad * upsample:]
# middle chunk
else:
data = data[front_pad * upsample:(front_pad + block) * upsample]
return data
def offlineTTS(self, text):
get_tone_ids = False
merge_sentences = False
input_ids = self.frontend.get_input_ids(
text,
merge_sentences=merge_sentences,
get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
wav_list = []
for i in range(len(phone_ids)):
orig_hs = self.engine.executor.am_encoder_infer_sess.run(
None, input_feed={'text': phone_ids[i].numpy()}
)
hs = orig_hs[0]
am_decoder_output = self.engine.executor.am_decoder_sess.run(
None, input_feed={'xs': hs})
am_postnet_output = self.engine.executor.am_postnet_sess.run(
None,
input_feed={
'xs': np.transpose(am_decoder_output[0], (0, 2, 1))
})
am_output_data = am_decoder_output + np.transpose(
am_postnet_output[0], (0, 2, 1))
normalized_mel = am_output_data[0][0]
mel = denorm(normalized_mel, self.engine.executor.am_mu, self.engine.executor.am_std)
wav = self.engine.executor.voc_sess.run(
output_names=None, input_feed={'logmel': mel})[0]
wav_list.append(wav)
wavs = np.concatenate(wav_list)
return wavs
def streamTTS(self, text):
for sub_wav_base64 in self.engine.run(sentence=text):
yield sub_wav_base64
def streamTTSBytes(self, text):
for wav in self.engine.executor.infer(
text=text,
lang=self.engine.config.lang,
am=self.engine.config.am,
spk_id=0):
wav = float2pcm(wav) # float32 to int16
wav_bytes = wav.tobytes() # to bytes
yield wav_bytes
def after_process(self, wav):
# for tvm
wav = float2pcm(wav) # float32 to int16
wav_bytes = wav.tobytes() # to bytes
wav_base64 = base64.b64encode(wav_bytes).decode('utf8') # to base64
return wav_base64
def streamTTS_TVM(self, text):
# 用 TVM 优化
pass
if __name__ == '__main__':
text = "啊哈哈哈哈哈哈啊哈哈哈哈哈哈啊哈哈哈哈哈哈啊哈哈哈哈哈哈啊哈哈哈哈哈哈"
config_path="../../PaddleSpeech/demos/streaming_tts_server/conf/tts_online_application.yaml"
tts = TTS(config_path)
for sub_wav in tts.streamTTS(text):
print("sub_wav_base64: ", len(sub_wav))
end_wav = tts.offlineTTS(text)
print(end_wav)
|
|
###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import numpy as np
import torch
import torch.nn as nn
import lib.utils as utils
from lib.diffeq_solver import DiffeqSolver
from generate_timeseries import Periodic_1d
from torch.distributions import uniform
from torch.utils.data import DataLoader
from mujoco_physics import HopperPhysics
from physionet import PhysioNet, variable_time_collate_fn, get_data_min_max
from person_activity import PersonActivity, variable_time_collate_fn_activity
from sklearn import model_selection
import random
#####################################################################################################
def parse_datasets(args, device):
def basic_collate_fn(
batch, time_steps, args=args, device=device, data_type="train"
):
batch = torch.stack(batch)
data_dict = {"data": batch, "time_steps": time_steps}
data_dict = utils.split_and_subsample_batch(
data_dict, args, data_type=data_type
)
return data_dict
dataset_name = args.dataset
n_total_tp = args.timepoints + args.extrap
max_t_extrap = args.max_t / args.timepoints * n_total_tp
##################################################################
# MuJoCo dataset
if dataset_name == "hopper":
dataset_obj = HopperPhysics(
root="data", download=True, generate=False, device=device
)
dataset = dataset_obj.get_dataset()[: args.n]
dataset = dataset.to(device)
n_tp_data = dataset[:].shape[1]
# Time steps that are used later on for exrapolation
time_steps = torch.arange(start=0, end=n_tp_data, step=1).float().to(device)
time_steps = time_steps / len(time_steps)
dataset = dataset.to(device)
time_steps = time_steps.to(device)
if not args.extrap:
# Creating dataset for interpolation
# sample time points from different parts of the timeline,
# so that the model learns from different parts of hopper trajectory
n_traj = len(dataset)
n_tp_data = dataset.shape[1]
n_reduced_tp = args.timepoints
# sample time points from different parts of the timeline,
# so that the model learns from different parts of hopper trajectory
start_ind = np.random.randint(
0, high=n_tp_data - n_reduced_tp + 1, size=n_traj
)
end_ind = start_ind + n_reduced_tp
sliced = []
for i in range(n_traj):
sliced.append(dataset[i, start_ind[i] : end_ind[i], :])
dataset = torch.stack(sliced).to(device)
time_steps = time_steps[:n_reduced_tp]
# Split into train and test by the time sequences
train_y, test_y = utils.split_train_test(dataset, train_fraq=0.8)
n_samples = len(dataset)
input_dim = dataset.size(-1)
batch_size = min(args.batch_size, args.n)
train_dataloader = DataLoader(
train_y,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda batch: basic_collate_fn(
batch, time_steps, data_type="train"
),
)
test_dataloader = DataLoader(
test_y,
batch_size=n_samples,
shuffle=False,
collate_fn=lambda batch: basic_collate_fn(
batch, time_steps, data_type="test"
),
)
data_objects = {
"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
}
return data_objects
##################################################################
# Physionet dataset
if dataset_name == "physionet":
train_dataset_obj = PhysioNet(
"data/physionet",
train=True,
quantization=args.quantization,
download=True,
n_samples=min(10000, args.n),
device=device,
)
# Use custom collate_fn to combine samples with arbitrary time observations.
# Returns the dataset along with mask and time steps
test_dataset_obj = PhysioNet(
"data/physionet",
train=False,
quantization=args.quantization,
download=True,
n_samples=min(10000, args.n),
device=device,
)
# Combine and shuffle samples from physionet Train and physionet Test
total_dataset = train_dataset_obj[: len(train_dataset_obj)]
if not args.classif:
# Concatenate samples from original Train and Test sets
# Only 'training' physionet samples are have labels. Therefore, if we do classifiction task, we don't need physionet 'test' samples.
total_dataset = total_dataset + test_dataset_obj[: len(test_dataset_obj)]
# Shuffle and split
train_data, test_data = model_selection.train_test_split(
total_dataset, train_size=0.8, random_state=42, shuffle=True
)
record_id, tt, vals, mask, labels = train_data[0]
n_samples = len(total_dataset)
input_dim = vals.size(-1)
batch_size = min(min(len(train_dataset_obj), args.batch_size), args.n)
data_min, data_max = get_data_min_max(total_dataset)
train_dataloader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn(
batch,
args,
device,
data_type="train",
data_min=data_min,
data_max=data_max,
),
)
test_dataloader = DataLoader(
test_data,
batch_size=n_samples,
shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn(
batch,
args,
device,
data_type="test",
data_min=data_min,
data_max=data_max,
),
)
attr_names = train_dataset_obj.params
data_objects = {
"dataset_obj": train_dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"attr": attr_names, # optional
"classif_per_tp": False, # optional
"n_labels": 1,
} # optional
return data_objects
##################################################################
# Human activity dataset
if dataset_name == "activity":
n_samples = min(10000, args.n)
dataset_obj = PersonActivity(
"data/PersonActivity", download=True, n_samples=n_samples, device=device
)
print(dataset_obj)
# Use custom collate_fn to combine samples with arbitrary time observations.
# Returns the dataset along with mask and time steps
# Shuffle and split
train_data, test_data = model_selection.train_test_split(
dataset_obj, train_size=0.8, random_state=42, shuffle=True
)
train_data = [
train_data[i] for i in np.random.choice(len(train_data), len(train_data))
]
test_data = [
test_data[i] for i in np.random.choice(len(test_data), len(test_data))
]
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
batch_size = min(min(len(dataset_obj), args.batch_size), args.n)
train_dataloader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn_activity(
batch, args, device, data_type="train"
),
)
test_dataloader = DataLoader(
test_data,
batch_size=n_samples,
shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn_activity(
batch, args, device, data_type="test"
),
)
data_objects = {
"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"classif_per_tp": True, # optional
"n_labels": labels.size(-1),
}
return data_objects
########### 1d datasets ###########
# Sampling args.timepoints time points in the interval [0, args.max_t]
# Sample points for both training sequence and explapolation (test)
distribution = uniform.Uniform(torch.Tensor([0.0]), torch.Tensor([max_t_extrap]))
time_steps_extrap = distribution.sample(torch.Size([n_total_tp - 1]))[:, 0]
time_steps_extrap = torch.cat((torch.Tensor([0.0]), time_steps_extrap))
time_steps_extrap = torch.sort(time_steps_extrap)[0]
dataset_obj = None
##################################################################
# Sample a periodic function
if dataset_name == "periodic":
dataset_obj = Periodic_1d(
init_freq=None,
init_amplitude=1.0,
final_amplitude=1.0,
final_freq=None,
z0=1.0,
)
##################################################################
if dataset_obj is None:
raise Exception("Unknown dataset: {}".format(dataset_name))
dataset = dataset_obj.sample_traj(
time_steps_extrap, n_samples=args.n, noise_weight=args.noise_weight
)
# Process small datasets
dataset = dataset.to(device)
time_steps_extrap = time_steps_extrap.to(device)
train_y, test_y = utils.split_train_test(dataset, train_fraq=0.8)
n_samples = len(dataset)
input_dim = dataset.size(-1)
batch_size = min(args.batch_size, args.n)
train_dataloader = DataLoader(
train_y,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda batch: basic_collate_fn(
batch, time_steps_extrap, data_type="train"
),
)
test_dataloader = DataLoader(
test_y,
batch_size=args.n,
shuffle=False,
collate_fn=lambda batch: basic_collate_fn(
batch, time_steps_extrap, data_type="test"
),
)
data_objects = { # "dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
}
return data_objects
|
|
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from pytest import approx
from shapely.geometry import LineString, Point, Polygon
import momepy as mm
from momepy import sw_high
from momepy.shape import _make_circle
class TestDimensions:
def setup_method(self):
test_file_path = mm.datasets.get_path("bubenec")
self.df_buildings = gpd.read_file(test_file_path, layer="buildings")
self.df_streets = gpd.read_file(test_file_path, layer="streets")
self.df_tessellation = gpd.read_file(test_file_path, layer="tessellation")
self.df_buildings["height"] = np.linspace(10.0, 30.0, 144)
def test_Area(self):
self.df_buildings["area"] = mm.Area(self.df_buildings).series
check = self.df_buildings.geometry[0].area
assert self.df_buildings["area"][0] == check
def test_Perimeter(self):
self.df_buildings["perimeter"] = mm.Perimeter(self.df_buildings).series
check = self.df_buildings.geometry[0].length
assert self.df_buildings["perimeter"][0] == check
def test_Volume(self):
self.df_buildings["area"] = self.df_buildings.geometry.area
self.df_buildings["volume"] = mm.Volume(
self.df_buildings, "height", "area"
).series
check = self.df_buildings.geometry[0].area * self.df_buildings.height[0]
assert self.df_buildings["volume"][0] == check
area = self.df_buildings.geometry.area
height = np.linspace(10.0, 30.0, 144)
self.df_buildings["volume"] = mm.Volume(self.df_buildings, height, area).series
check = self.df_buildings.geometry[0].area * self.df_buildings.height[0]
assert self.df_buildings["volume"][0] == check
self.df_buildings["volume"] = mm.Volume(self.df_buildings, "height").series
check = self.df_buildings.geometry[0].area * self.df_buildings.height[0]
assert self.df_buildings["volume"][0] == check
with pytest.raises(KeyError):
self.df_buildings["volume"] = mm.Volume(
self.df_buildings, "height", "nonexistent"
)
def test_FloorArea(self):
self.df_buildings["area"] = self.df_buildings.geometry.area
self.df_buildings["floor_area"] = mm.FloorArea(
self.df_buildings, "height", "area"
).series
check = self.df_buildings.geometry[0].area * (self.df_buildings.height[0] // 3)
assert self.df_buildings["floor_area"][0] == check
area = self.df_buildings.geometry.area
height = np.linspace(10.0, 30.0, 144)
self.df_buildings["floor_area"] = mm.FloorArea(
self.df_buildings, height, area
).series
assert self.df_buildings["floor_area"][0] == check
self.df_buildings["floor_area"] = mm.FloorArea(
self.df_buildings, "height"
).series
assert self.df_buildings["floor_area"][0] == check
with pytest.raises(KeyError):
self.df_buildings["floor_area"] = mm.FloorArea(
self.df_buildings, "height", "nonexistent"
)
def test_CourtyardArea(self):
self.df_buildings["area"] = self.df_buildings.geometry.area
self.df_buildings["courtyard_area"] = mm.CourtyardArea(
self.df_buildings, "area"
).series
check = (
Polygon(self.df_buildings.geometry[80].exterior).area
- self.df_buildings.geometry[80].area
)
assert self.df_buildings["courtyard_area"][80] == check
area = self.df_buildings.geometry.area
self.df_buildings["courtyard_area"] = mm.CourtyardArea(
self.df_buildings, area
).series
assert self.df_buildings["courtyard_area"][80] == check
self.df_buildings["courtyard_area"] = mm.CourtyardArea(self.df_buildings).series
assert self.df_buildings["courtyard_area"][80] == check
with pytest.raises(KeyError):
self.df_buildings["courtyard_area"] = mm.CourtyardArea(
self.df_buildings, "nonexistent"
)
def test_LongestAxisLength(self):
self.df_buildings["long_axis"] = mm.LongestAxisLength(self.df_buildings).series
check = (
_make_circle(self.df_buildings.geometry[0].convex_hull.exterior.coords)[2]
* 2
)
assert self.df_buildings["long_axis"][0] == check
def test_AverageCharacter(self):
spatial_weights = sw_high(k=3, gdf=self.df_tessellation, ids="uID")
self.df_tessellation["area"] = area = self.df_tessellation.geometry.area
self.df_tessellation["mesh_ar"] = mm.AverageCharacter(
self.df_tessellation,
values="area",
spatial_weights=spatial_weights,
unique_id="uID",
mode="mode",
).mode
self.df_tessellation["mesh_array"] = mm.AverageCharacter(
self.df_tessellation,
values=area,
spatial_weights=spatial_weights,
unique_id="uID",
mode="median",
).median
self.df_tessellation["mesh_id"] = mm.AverageCharacter(
self.df_tessellation,
spatial_weights=spatial_weights,
values="area",
rng=(10, 90),
unique_id="uID",
).mean
self.df_tessellation["mesh_iq"] = mm.AverageCharacter(
self.df_tessellation,
spatial_weights=spatial_weights,
values="area",
rng=(25, 75),
unique_id="uID",
).series
all_m = mm.AverageCharacter(
self.df_tessellation,
spatial_weights=spatial_weights,
values="area",
unique_id="uID",
)
two = mm.AverageCharacter(
self.df_tessellation,
spatial_weights=spatial_weights,
values="area",
unique_id="uID",
mode=["mean", "median"],
)
with pytest.raises(ValueError):
self.df_tessellation["mesh_ar"] = mm.AverageCharacter(
self.df_tessellation,
values="area",
spatial_weights=spatial_weights,
unique_id="uID",
mode="nonexistent",
)
with pytest.raises(ValueError):
self.df_tessellation["mesh_ar"] = mm.AverageCharacter(
self.df_tessellation,
values="area",
spatial_weights=spatial_weights,
unique_id="uID",
mode=["nonexistent", "mean"],
)
assert self.df_tessellation["mesh_ar"][0] == approx(249.503, rel=1e-3)
assert self.df_tessellation["mesh_array"][0] == approx(2623.996, rel=1e-3)
assert self.df_tessellation["mesh_id"][38] == approx(2250.224, rel=1e-3)
assert self.df_tessellation["mesh_iq"][38] == approx(2118.609, rel=1e-3)
assert all_m.mean[0] == approx(2922.957, rel=1e-3)
assert all_m.median[0] == approx(2623.996, rel=1e-3)
assert all_m.mode[0] == approx(249.503, rel=1e-3)
assert all_m.series[0] == approx(2922.957, rel=1e-3)
assert two.mean[0] == approx(2922.957, rel=1e-3)
assert two.median[0] == approx(2623.996, rel=1e-3)
sw_drop = sw_high(k=3, gdf=self.df_tessellation[2:], ids="uID")
assert (
mm.AverageCharacter(
self.df_tessellation,
values="area",
spatial_weights=sw_drop,
unique_id="uID",
)
.series.isna()
.any()
)
def test_StreetProfile(self):
results = mm.StreetProfile(self.df_streets, self.df_buildings, heights="height")
assert results.w[0] == 47.9039130128257
assert results.wd[0] == 0.026104885468705645
assert results.h[0] == 15.26806526806527
assert results.p[0] == 0.31872271611668607
assert results.o[0] == 0.9423076923076923
assert results.hd[0] == 9.124556701878003
height = np.linspace(10.0, 30.0, 144)
results2 = mm.StreetProfile(
self.df_streets, self.df_buildings, heights=height, tick_length=100
)
assert results2.w[0] == 70.7214870365335
assert results2.wd[0] == 8.50508193935929
assert results2.h[0] == pytest.approx(23.87158296249206)
assert results2.p[0] == pytest.approx(0.3375435664999579)
assert results2.o[0] == 0.5769230769230769
assert results2.hd[0] == pytest.approx(5.9307227575674)
results3 = mm.StreetProfile(self.df_streets, self.df_buildings)
assert results3.w[0] == 47.9039130128257
assert results3.wd[0] == 0.026104885468705645
assert results3.o[0] == 0.9423076923076923
# avoid infinity
blg = gpd.GeoDataFrame(
dict(height=[2, 5]),
geometry=[
Point(0, 0).buffer(10, cap_style=3),
Point(30, 0).buffer(10, cap_style=3),
],
)
lines = gpd.GeoDataFrame(
geometry=[LineString([(-8, -8), (8, 8)]), LineString([(15, -10), (15, 10)])]
)
assert mm.StreetProfile(lines, blg, "height", 2).p.equals(
pd.Series([np.nan, 0.35])
)
def test_WeightedCharacter(self):
sw = sw_high(k=3, gdf=self.df_tessellation, ids="uID")
weighted = mm.WeightedCharacter(self.df_buildings, "height", sw, "uID").series
assert weighted[38] == approx(18.301, rel=1e-3)
self.df_buildings["area"] = self.df_buildings.geometry.area
sw = sw_high(k=3, gdf=self.df_tessellation, ids="uID")
weighted = mm.WeightedCharacter(
self.df_buildings, "height", sw, "uID", "area"
).series
assert weighted[38] == approx(18.301, rel=1e-3)
area = self.df_buildings.geometry.area
sw = sw_high(k=3, gdf=self.df_tessellation, ids="uID")
weighted = mm.WeightedCharacter(
self.df_buildings, self.df_buildings.height, sw, "uID", area
).series
assert weighted[38] == approx(18.301, rel=1e-3)
sw_drop = sw_high(k=3, gdf=self.df_tessellation[2:], ids="uID")
assert (
mm.WeightedCharacter(self.df_buildings, "height", sw_drop, "uID")
.series.isna()
.any()
)
def test_CoveredArea(self):
sw = sw_high(gdf=self.df_tessellation, k=1, ids="uID")
covered_sw = mm.CoveredArea(self.df_tessellation, sw, "uID").series
assert covered_sw[0] == approx(24115.667, rel=1e-3)
sw_drop = sw_high(k=3, gdf=self.df_tessellation[2:], ids="uID")
assert mm.CoveredArea(self.df_tessellation, sw_drop, "uID").series.isna().any()
def test_PerimeterWall(self):
sw = sw_high(gdf=self.df_buildings, k=1)
wall = mm.PerimeterWall(self.df_buildings).series
wall_sw = mm.PerimeterWall(self.df_buildings, sw).series
assert wall[0] == wall_sw[0]
assert wall[0] == approx(137.210, rel=1e-3)
def test_SegmentsLength(self):
absol = mm.SegmentsLength(self.df_streets).sum
mean = mm.SegmentsLength(self.df_streets, mean=True).mean
assert max(absol) == pytest.approx(1907.502238338006)
assert max(mean) == pytest.approx(249.5698434867373)
|
|
from collections import Counter
import numpy as np
import xmltodict
def parse_xml(fp_path):
with open(fp_path) as f:
xml_content = f.read()
return xmltodict.parse(xml_content)
def count_number_of_layers(xdict):
net = xdict['net'] # the first field is 'net'
print(f"Total number of layer entries - {len(net['layer'])}")
layer_names = []
for l in net['layer']:
for k, v in l.items():
if not k.startswith('@'):
layer_names.append(k)
ln_vs_count = dict(Counter(layer_names))
return ln_vs_count
def weights_from_text(s):
weights = s.replace('\n', ' ').replace('\r', '')
weights = weights.split()
w = np.array(weights)
w = w.astype(np.float64)
return w
def get_fc_weights(xdict):
net = xdict['net']
fc_weights = None
# Ugly, ugly ... super ugly !
for l in net['layer']:
for k, v in l.items():
if not k.startswith('@'):
if k == 'fc_no_bias':
for lk, lv in l[k].items():
if lk == '#text':
fc_weights = weights_from_text(l[k]['#text'])
if fc_weights is None:
raise ValueError('No FC Weights found')
assert len(fc_weights) == 32768
return fc_weights
def get_conv_weights(xdict):
net = xdict['net']
conv_layers = []
starting_index = 29
# find all the conv layers
for l in net['layer']:
for k, v in l.items():
if not k.startswith('@'):
if k == 'con':
num_filters = l[k]['@num_filters']
nr = l[k]['@nr']
nc = l[k]['@nc']
sy = l[k]['@stride_y']
sx = l[k]['@stride_x']
for lk, lv in l[k].items():
if lk == '#text':
conv_weights = weights_from_text(l[k]['#text'])
conv_layers.append({
'name': 'conv_' + str(starting_index),
'id': l['@idx'],
'num_filters': int(num_filters),
'nr': int(nr),
'nc': int(nc),
'sx': int(sx),
'sy': int(sy),
'weights': conv_weights,
'total_weights': len(conv_weights)
})
starting_index = starting_index - 1
assert len(conv_layers) == 29
return conv_layers
def get_affine_weights(xdict, layer_prefix='sc'):
net = xdict['net']
affine_layers = []
starting_index = 29
# now affine layers do not have any attribute
# .. just #text which is the value of the
# weights
# find all the conv layers
for l in net['layer']:
for k, v in l.items():
if not k.startswith('@'):
if k == 'affine_con':
w = weights_from_text(l[k])
affine_layers.append({
'name': layer_prefix + '_' + str(starting_index),
'id': l['@idx'],
'weights': w,
'total_weights': len(w)
})
starting_index = starting_index - 1
assert len(affine_layers) == 29
return affine_layers
|
|
from numpy import zeros, random, dot#, array, matrix
def sketch(M, k):
# matrix height and width
# M = matrix(M)
# change width and height
# w,h = M.shape
w = len(M)
h = len(M[0])
# generating k random directions simply use vectors of normally distributed random numbers
rd = random.randn(k, h)
# init sketches
sketches = zeros((k, w))
for i in range(k):
for j in range(w):
# v = dot(rd[i], M[j, :])
v = dot(rd[i], M[j])
sketch = 1
if v > 0:
sketch = 1
elif v < 0:
sketch = -1
# v == 0 is of a tiny probability and we can choose +1 or -1 randomly
else:
if random.random() >= 0.5:
sketch = 1
else:
sketch = -1
sketches[i][j] = sketch
return sketches
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import struct
import time
"""
train data can be find here http://yann.lecun.com/exdb/mnist/
"""
def get_image(num):
with open('train-images-idx3-ubyte', 'rb') as f:
buf = f.read(16)
magic = struct.unpack('>4i', buf)
x = np.array([])
for i in range(0, magic[1]):
imagebuf = f.read(784)
image = struct.unpack('>784B', imagebuf)
x = np.append(x, np.array(image, dtype=np.float32)/255.0)
if (i + 1) % num == 0:
yield x.reshape(num, 784)
x = np.array([])
def get_label(num):
with open('train-labels-idx1-ubyte', 'rb') as f:
buf = f.read(8)
header = struct.unpack('>2i', buf)
y = np.array([])
for i in range(0, header[1]):
labelbuf = f.read(1)
label = struct.unpack('B', labelbuf)
yy = np.zeros(10)
yy[label] = 1.0
y = np.append(y, yy)
if (i + 1) % num == 0:
yield y.reshape(num, 10)
y = np.array([])
if __name__ == "__main__":
# Create the model
x = tf.placeholder(tf.float32, [None, 784], name = "Input")
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10], name = "Result")
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
tf.scalar_summary("cross_entropy", cross_entropy)
writer = tf.train.SummaryWriter("mnist", sess.graph)
merged = tf.merge_all_summaries()
i = 0
for image, label in zip(get_image(100), get_label(100)):
i = i + 1
_, summary = sess.run([train_step, merged], feed_dict={x: image, y_: label})
writer.add_summary(summary, i)
#print(sess.run(W))
for image, label in zip(get_image(1), get_label(1)):
classification = sess.run(y, feed_dict={x: image})
print('----------------')
print(np.where(classification==classification.max()))
print(label)
print(np.where(label==label.max()))
time.sleep(1)
|
|
from applications.parameter_optimization.optimized_nio_base import OptimizedNIOBase
from algorithms import WaterWaveOptimization
from numpy import array
import logging
logging.basicConfig()
logger = logging.getLogger('OptimizedWWOFunc')
logger.setLevel('INFO')
class OptimizedWWOFunc(OptimizedNIOBase):
def __init__(self, lower=(1.001, 0), upper=(1.01, 1), dimension=2, benchmark=None):
super(OptimizedWWOFunc, self).__init__(lower, upper, dimension, benchmark)
def get_optimum(self):
return array([[1.0026, 0.5]]), self.benchmark.get_optimum()[-1]
def eval(self, params):
wwo = WaterWaveOptimization(alpha=params[0], lamb=params[1], func=self.benchmark, iterations=200)
best = wwo.run_return_best_val()
self.eval_count += wwo.eval_count
return best
|
|
# -*- coding: utf-8 -*-
"""Convolutional MoE layers. The code here is based on the implementation of the standard convolutional layers in Keras.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, regularizers, constraints
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.python.keras.utils import conv_utils
# FIXME: In tf2.0, this API is updated.
#from keras.utils import conv_utils
class _ConvMoE(Layer):
"""Abstract nD convolution layer mixture of experts (private, used as implementation base).
"""
def __init__(self, rank,
n_filters,
n_experts_per_filter,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(_ConvMoE, self).__init__(**kwargs)
self.rank = rank
self.n_filters = n_filters
self.n_experts_per_filter = n_experts_per_filter
self.n_total_filters = self.n_filters * self.n_experts_per_filter
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.expert_activation = activations.get(expert_activation)
self.gating_activation = activations.get(gating_activation)
self.use_expert_bias = use_expert_bias
self.use_gating_bias = use_gating_bias
self.expert_kernel_initializer_scale = expert_kernel_initializer_scale
self.gating_kernel_initializer_scale = gating_kernel_initializer_scale
self.expert_bias_initializer = initializers.get(expert_bias_initializer)
self.gating_bias_initializer = initializers.get(gating_bias_initializer)
self.expert_kernel_regularizer = regularizers.get(expert_kernel_regularizer)
self.gating_kernel_regularizer = regularizers.get(gating_kernel_regularizer)
self.expert_bias_regularizer = regularizers.get(expert_bias_regularizer)
self.gating_bias_regularizer = regularizers.get(gating_bias_regularizer)
self.expert_kernel_constraint = constraints.get(expert_kernel_constraint)
self.gating_kernel_constraint = constraints.get(gating_kernel_constraint)
self.expert_bias_constraint = constraints.get(expert_bias_constraint)
self.gating_bias_constraint = constraints.get(gating_bias_constraint)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
expert_init_std = self.expert_kernel_initializer_scale / np.sqrt(input_dim*np.prod(self.kernel_size))
gating_init_std = self.gating_kernel_initializer_scale / np.sqrt(np.prod(input_shape[1:]))
expert_kernel_shape = self.kernel_size + (input_dim, self.n_total_filters)
self.expert_kernel = self.add_weight(shape=expert_kernel_shape,
initializer=RandomNormal(mean=0., stddev=expert_init_std),
name='expert_kernel',
regularizer=self.expert_kernel_regularizer,
constraint=self.expert_kernel_constraint)
gating_kernel_shape = input_shape[1:] + (self.n_filters, self.n_experts_per_filter)
self.gating_kernel = self.add_weight(shape=gating_kernel_shape,
initializer=RandomNormal(mean=0., stddev=gating_init_std),
name='gating_kernel',
regularizer=self.gating_kernel_regularizer,
constraint=self.gating_kernel_constraint)
if self.use_expert_bias:
expert_bias_shape = ()
for i in range(self.rank):
expert_bias_shape = expert_bias_shape + (1,)
expert_bias_shape = expert_bias_shape + (self.n_filters, self.n_experts_per_filter)
self.expert_bias = self.add_weight(shape=expert_bias_shape,
initializer=self.expert_bias_initializer,
name='expert_bias',
regularizer=self.expert_bias_regularizer,
constraint=self.expert_bias_constraint)
else:
self.expert_bias = None
if self.use_gating_bias:
self.gating_bias = self.add_weight(shape=(self.n_filters, self.n_experts_per_filter),
initializer=self.gating_bias_initializer,
name='gating_bias',
regularizer=self.gating_bias_regularizer,
constraint=self.gating_bias_constraint)
else:
self.gating_bias = None
self.o_shape = self.compute_output_shape(input_shape=input_shape)
self.new_gating_outputs_shape = (-1,)
for i in range(self.rank):
self.new_gating_outputs_shape = self.new_gating_outputs_shape + (1,)
self.new_gating_outputs_shape = self.new_gating_outputs_shape + (self.n_filters, self.n_experts_per_filter)
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
if self.rank == 1:
expert_outputs = K.conv1d(
inputs,
self.expert_kernel,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.rank == 2:
expert_outputs = K.conv2d(
inputs,
self.expert_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.rank == 3:
expert_outputs = K.conv3d(
inputs,
self.expert_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
expert_outputs = K.reshape(expert_outputs, (-1,) + self.o_shape[1:-1] + (self.n_filters, self.n_experts_per_filter))
if self.use_expert_bias:
expert_outputs = K.bias_add(
expert_outputs,
self.expert_bias,
data_format=self.data_format)
if self.expert_activation is not None:
expert_outputs = self.expert_activation(expert_outputs)
gating_outputs = tf.tensordot(inputs, self.gating_kernel, axes=self.rank+1) # samples x n_filters x n_experts_per_filter
if self.use_gating_bias:
gating_outputs = K.bias_add(
gating_outputs,
self.gating_bias,
data_format=self.data_format)
if self.gating_activation is not None:
gating_outputs = self.gating_activation(gating_outputs)
gating_outputs = K.reshape(gating_outputs, self.new_gating_outputs_shape)
outputs = K.sum(expert_outputs * gating_outputs, axis=-1, keepdims=False)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.n_filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.n_filters) + tuple(new_space)
def get_config(self):
config = {
'rank': self.rank,
'n_filters': self.n_filters,
'n_experts_per_filter':n_experts_per_filter,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'expert_activation': activations.serialize(self.expert_activation),
'gating_activation': activations.serialize(self.gating_activation),
'use_expert_bias': self.use_expert_bias,
'use_gating_bias': self.use_gating_bias,
'expert_kernel_initializer_scale':self.expert_kernel_initializer_scale,
'gating_kernel_initializer_scale':self.gating_kernel_initializer_scale,
'expert_bias_initializer': initializers.serialize(self.expert_bias_initializer),
'gating_bias_initializer': initializers.serialize(self.gating_bias_initializer),
'expert_kernel_regularizer': regularizers.serialize(self.expert_kernel_regularizer),
'gating_kernel_regularizer': regularizers.serialize(self.gating_kernel_regularizer),
'expert_bias_regularizer': regularizers.serialize(self.expert_bias_regularizer),
'gating_bias_regularizer': regularizers.serialize(self.gating_bias_regularizer),
'expert_kernel_constraint': constraints.serialize(self.expert_kernel_constraint),
'gating_kernel_constraint': constraints.serialize(self.gating_kernel_constraint),
'expert_bias_constraint': constraints.serialize(self.expert_bias_constraint),
'gating_bias_constraint': constraints.serialize(self.gating_bias_constraint),
'activity_regularizer': regularizers.serialize(self.activity_regularizer)
}
base_config = super(_ConvMoE, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv1DMoE(_ConvMoE):
"""1D convolution layer (e.g. temporal convolution).
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, new_steps, n_filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
n_filters,
n_experts_per_filter,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
if padding == 'causal':
if data_format != 'channels_last':
raise ValueError('When using causal padding in `Conv1DMoE`, `data_format` must be "channels_last" (temporal data).')
super(Conv1DMoE, self).__init__(
rank=1,
n_filters=n_filters,
n_experts_per_filter=n_experts_per_filter,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
expert_activation=expert_activation,
gating_activation=gating_activation,
use_expert_bias=use_expert_bias,
use_gating_bias=use_gating_bias,
expert_kernel_initializer_scale=expert_kernel_initializer_scale,
gating_kernel_initializer_scale=gating_kernel_initializer_scale,
expert_bias_initializer=expert_bias_initializer,
gating_bias_initializer=gating_bias_initializer,
expert_kernel_regularizer=expert_kernel_regularizer,
gating_kernel_regularizer=gating_kernel_regularizer,
expert_bias_regularizer=expert_bias_regularizer,
gating_bias_regularizer=gating_bias_regularizer,
expert_kernel_constraint=expert_kernel_constraint,
gating_kernel_constraint=gating_kernel_constraint,
expert_bias_constraint=expert_bias_constraint,
gating_bias_constraint=gating_bias_constraint,
activity_regularizer=activity_regularizer,
**kwargs)
self.input_spec = InputSpec(ndim=3)
def get_config(self):
config = super(Conv1DMoE, self).get_config()
config.pop('rank')
return config
class Conv2DMoE(_ConvMoE):
"""2D convolution layer (e.g. spatial convolution over images).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(samples, n_filters, new_rows, new_cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, new_rows, new_cols, n_filters)`
if `data_format` is `"channels_last"`.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
n_filters,
n_experts_per_filter,
kernel_size,
strides=(1,1),
padding='valid',
data_format='channels_last',
dilation_rate=(1,1),
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Conv2DMoE, self).__init__(
rank=2,
n_filters=n_filters,
n_experts_per_filter=n_experts_per_filter,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
expert_activation=expert_activation,
gating_activation=gating_activation,
use_expert_bias=use_expert_bias,
use_gating_bias=use_gating_bias,
expert_kernel_initializer_scale=expert_kernel_initializer_scale,
gating_kernel_initializer_scale=gating_kernel_initializer_scale,
expert_bias_initializer=expert_bias_initializer,
gating_bias_initializer=gating_bias_initializer,
expert_kernel_regularizer=expert_kernel_regularizer,
gating_kernel_regularizer=gating_kernel_regularizer,
expert_bias_regularizer=expert_bias_regularizer,
gating_bias_regularizer=gating_bias_regularizer,
expert_kernel_constraint=expert_kernel_constraint,
gating_kernel_constraint=gating_kernel_constraint,
expert_bias_constraint=expert_bias_constraint,
gating_bias_constraint=gating_bias_constraint,
activity_regularizer=activity_regularizer,
**kwargs)
self.input_spec = InputSpec(ndim=4)
def get_config(self):
config = super(Conv2DMoE, self).get_config()
config.pop('rank')
return config
class Conv3DMoE(_ConvMoE):
"""3D convolution layer (e.g. spatial convolution over volumes).
# Input shape
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)`
if `data_format` is `"channels_last"`.
# Output shape
5D tensor with shape:
`(samples, n_filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, n_filters)`
if `data_format` is `"channels_last"`.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have changed due to padding.
"""
def __init__(self,
n_filters,
n_experts_per_filter,
kernel_size,
strides=(1,1,1),
padding='valid',
data_format='channels_last',
dilation_rate=(1,1,1),
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Conv3DMoE, self).__init__(
rank=3,
n_filters=n_filters,
n_experts_per_filter=n_experts_per_filter,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
expert_activation=expert_activation,
gating_activation=gating_activation,
use_expert_bias=use_expert_bias,
use_gating_bias=use_gating_bias,
expert_kernel_initializer_scale=expert_kernel_initializer_scale,
gating_kernel_initializer_scale=gating_kernel_initializer_scale,
expert_bias_initializer=expert_bias_initializer,
gating_bias_initializer=gating_bias_initializer,
expert_kernel_regularizer=expert_kernel_regularizer,
gating_kernel_regularizer=gating_kernel_regularizer,
expert_bias_regularizer=expert_bias_regularizer,
gating_bias_regularizer=gating_bias_regularizer,
expert_kernel_constraint=expert_kernel_constraint,
gating_kernel_constraint=gating_kernel_constraint,
expert_bias_constraint=expert_bias_constraint,
gating_bias_constraint=gating_bias_constraint,
activity_regularizer=activity_regularizer,
**kwargs)
self.input_spec = InputSpec(ndim=5)
def get_config(self):
config = super(Conv3DMoE, self).get_config()
config.pop('rank')
return config
# Aliases
Convolution1DMoE = Conv1DMoE
Convolution2DMoE = Conv2DMoE
Convolution3DMoE = Conv3DMoE
|
|
import pandas as pd
from statsmodels.tsa.holtwinters import Holt
import traceback
class EventHandler:
def __init__(self, return_func, config: dict):
self.return_func = return_func
self.temperatureData = pd.Series()
self.MIN_TRAIN_DATA = config.get("MIN_TRAIN_DATA") # TODO 100 ~60 sec
self.DATA_TO_PREDICT = config.get("DATA_TO_PREDICT") # TODO 100 ~30 sec
self.FREQUENCY = config.get("FREQUENCY") # TODO 30
self.TEMPERATURE_KEY = config.get("TEMPERATURE_KEY")
self.TIMESTAMP_KEY = config.get("TIMESTAMP_KEY")
self.PREDICTION_KEY = config.get("PREDICTION_KEY")
self.count = 0
def on_event(self, data: dict, topic: str):
try:
predictions = self._handle_temperature_data(data)
if predictions is not None:
self.return_func({self.PREDICTION_KEY: predictions.to_list()})
except Exception as e:
traceback.print_tb(e)
def _handle_temperature_data(self, data: dict):
self.count = self.count + 1
tempValue = data[self.TEMPERATURE_KEY]
datetime = data[self.TIMESTAMP_KEY]
datetime = pd.to_datetime(datetime, unit="ms")
# append data point
self.temperatureData[datetime] = tempValue
# check if enough data available to train a model
if(self.temperatureData.size > self.MIN_TRAIN_DATA):
# reduce to the last MIN_TRAIN_DATA points
self.temperatureData = self.temperatureData[-self.MIN_TRAIN_DATA:]
# predict only each FREQUENCY steps
if self.count % self.FREQUENCY == 0:
return self.trainAndPredict(self.temperatureData)
def trainAndPredict(self, series) -> pd.Series:
# train Holt-winters model
model = Holt(series)
fit = model.fit(optimized=True)
# predict defined nr of points
return fit.forecast(self.DATA_TO_PREDICT)
|
|
from typing import Union
import numpy as np
import pandas as pd
import hdbscan
from oolearning.model_wrappers.HyperParamsBase import HyperParamsBase
from oolearning.model_wrappers.ModelExceptions import MissingValueError
from oolearning.model_wrappers.ModelWrapperBase import ModelWrapperBase
class ClusteringHDBSCANHP(HyperParamsBase):
"""
"""
def __init__(self, min_cluster_size: int=5, min_samples: Union[int, None]=None):
super().__init__()
self._params_dict = dict(
min_cluster_size=min_cluster_size,
min_samples=min_samples,
)
class ClusteringHDBSCAN(ModelWrapperBase):
def __init__(self, num_jobs: int=1):
super().__init__()
self._num_jobs = num_jobs
@property
def feature_importance(self):
raise NotImplementedError()
def _train(self,
data_x: pd.DataFrame,
data_y=None,
hyper_params: HyperParamsBase = None) -> object:
assert data_y is None
# noinspection SpellCheckingInspection
return "hdbscan.HDBSCAN doesn't have a `predict` function, only `fit_predict`, which I call in my `predict`" # noqa
def _predict(self, model_object: object, data_x: pd.DataFrame) -> np.ndarray:
data = data_x.copy()
if data.isnull().sum().sum() > 0:
raise MissingValueError()
param_dict = self._hyper_params.params_dict
model_object = hdbscan.HDBSCAN(
min_cluster_size=param_dict['min_cluster_size'],
min_samples=param_dict['min_samples'],
core_dist_n_jobs=self._num_jobs
)
self._model_object = model_object
return model_object.fit_predict(X=data)
|
|
#!/usr/bin/env python
import numpy as np
from typing import Optional, Callable
from agents.common import PlayerAction, BoardPiece, SavedState, GenMove
from agents.agent_random import generate_move
from agents.agent_minimax import minimax_move
from agents.agent_mcts import mcts_move
from agents.agent_mcts_2 import mcts_move_2
from agents.agent_mcts_nn import mcts_nn_move
def user_move(board: np.ndarray, _player: BoardPiece, saved_state: Optional[SavedState]):
action = PlayerAction(-1)
while not 0 <= action < board.shape[1]:
try:
action = PlayerAction(input("Column? "))
except ValueError:
print("Input could not be converted to the dtype PlayerAction, try entering an integer.")
return action, saved_state
def human_vs_agent(
generate_move_1: GenMove,
generate_move_2: GenMove = user_move,
player_1: str = "Player 1",
player_2: str = "Player 2",
args_1: tuple = (),
args_2: tuple = (),
init_1: Callable = lambda board, player: None,
init_2: Callable = lambda board, player: None,
):
import time
from agents.common import PLAYER1, PLAYER2, PLAYER1_PRINT, PLAYER2_PRINT, GameState
from agents.common import initialize_game_state, pretty_print_board, apply_player_action, check_end_state
players = (PLAYER1, PLAYER2)
for play_first in (1, -1):
for init, player in zip((init_1, init_2)[::play_first], players):
init(initialize_game_state(), player)
saved_state = {PLAYER1: None, PLAYER2: None}
board = initialize_game_state()
gen_moves = (generate_move_1, generate_move_2)[::play_first]
player_names = (player_1, player_2)[::play_first]
gen_args = (args_1, args_2)[::play_first]
playing = True
while playing:
for player, player_name, gen_move, args in zip(
players, player_names, gen_moves, gen_args,
):
t0 = time.time()
print(pretty_print_board(board))
print(
f'{player_name} you are playing with {PLAYER1_PRINT if player == PLAYER1 else PLAYER2_PRINT}'
)
action, saved_state[player] = gen_move(
board.copy(), player, saved_state[player], *args
)
print(f"Move time: {time.time() - t0:.3f}s")
apply_player_action(board, action, player)
end_state = check_end_state(board, player, action)
if end_state != GameState.STILL_PLAYING:
print(pretty_print_board(board))
if end_state == GameState.IS_DRAW:
print("Game ended in draw")
else:
print(
f'{player_name} won playing {PLAYER1_PRINT if player == PLAYER1 else PLAYER2_PRINT}'
)
playing = False
break
def human_vs_agent(
generate_move_1: GenMove,
generate_move_2: GenMove = user_move,
player_1: str = "Player 1",
player_2: str = "Player 2",
args_1: tuple = (),
args_2: tuple = (),
init_1: Callable = lambda board, player: None,
init_2: Callable = lambda board, player: None,
):
import time
from agents.common import PLAYER1, PLAYER2, PLAYER1_PRINT, PLAYER2_PRINT, GameState
from agents.common import initialize_game_state, pretty_print_board, apply_player_action, check_end_state
players = (PLAYER1, PLAYER2)
for play_first in (1, -1):
for init, player in zip((init_1, init_2)[::play_first], players):
init(initialize_game_state(), player)
saved_state = {PLAYER1: None, PLAYER2: None}
board = initialize_game_state()
gen_moves = (generate_move_1, generate_move_2)[::play_first]
player_names = (player_1, player_2)[::play_first]
gen_args = (args_1, args_2)[::play_first]
playing = True
while playing:
for player, player_name, gen_move, args in zip(
players, player_names, gen_moves, gen_args,
):
t0 = time.time()
print(pretty_print_board(board))
print(
f'{player_name} you are playing with {PLAYER1_PRINT if player == PLAYER1 else PLAYER2_PRINT}'
)
action, saved_state[player] = gen_move(
board.copy(), player, saved_state[player], *args
)
print(f"Move time: {time.time() - t0:.3f}s")
apply_player_action(board, action, player)
end_state = check_end_state(board, player, action)
if end_state != GameState.STILL_PLAYING:
print(pretty_print_board(board))
if end_state == GameState.IS_DRAW:
print("Game ended in draw")
else:
print(
f'{player_name} won playing {PLAYER1_PRINT if player == PLAYER1 else PLAYER2_PRINT}'
)
playing = False
break
if __name__ == "__main__":
#human_vs_agent(user_move)
#human_vs_agent(generate_move)
#human_vs_agent(minimax_move)
#human_vs_agent(mcts_move)
#human_vs_agent(mcts_move, minimax_move)
# human_vs_agent(mcts_move_2)
#human_vs_agent(mcts_nn_move)
human_vs_agent(minimax_move, minimax_move)
#human_vs_agent(negamax_move)
|
|
"""Tests for normalization functions."""
from . import _unittest as unittest
from datatest._query.query import DictItems
from datatest._query.query import Result
from datatest.requirements import BaseRequirement
from datatest._utils import IterItems
from datatest._normalize import _normalize_lazy
from datatest._normalize import _normalize_eager
from datatest._normalize import normalize
try:
import pandas
except ImportError:
pandas = None
try:
import numpy
except ImportError:
numpy = None
class TestNormalizeLazy(unittest.TestCase):
def test_unchanged(self):
data = [1, 2, 3]
self.assertIs(_normalize_lazy(data), data, 'should return original object')
data = iter([1, 2, 3])
self.assertIs(_normalize_lazy(data), data, 'should return original object')
data = Result(iter([1, 2, 3]), evaluation_type=tuple)
self.assertIs(_normalize_lazy(data), data, 'should return original object')
def test_requirement(self):
result = Result(DictItems([('a', 1), ('b', 2)]), evaluation_type=dict)
normalized = _normalize_lazy(result)
self.assertIsInstance(normalized, IterItems)
@unittest.skipIf(not pandas, 'pandas not found')
def test_normalize_pandas_dataframe(self):
df = pandas.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
result = _normalize_lazy(df)
self.assertIsInstance(result, IterItems)
expected = {0: (1, 'a'), 1: (2, 'b'), 2: (3, 'c')}
self.assertEqual(dict(result), expected)
# Single column.
df = pandas.DataFrame([('x',), ('y',), ('z',)])
result = _normalize_lazy(df)
self.assertIsInstance(result, IterItems)
expected = {0: 'x', 1: 'y', 2: 'z'}
self.assertEqual(dict(result), expected, 'single column should be unwrapped')
# Multi-index.
df = pandas.DataFrame([('x',), ('y',), ('z',)])
df.index = pandas.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)])
result = _normalize_lazy(df)
self.assertIsInstance(result, IterItems)
expected = {(0, 0): 'x', (0, 1): 'y', (1, 0): 'z'}
self.assertEqual(dict(result), expected, 'multi-index should be tuples')
# Indexes must contain unique values, no duplicates
df = pandas.DataFrame([('x',), ('y',), ('z',)])
df.index = pandas.Index([0, 0, 1]) # <- Duplicate values.
with self.assertRaises(ValueError):
_normalize_lazy(df)
@unittest.skipIf(not pandas, 'pandas not found')
def test_normalize_pandas_series(self):
s = pandas.Series(['x', 'y', 'z'])
result = _normalize_lazy(s)
self.assertIsInstance(result, IterItems)
expected = {0: 'x', 1: 'y', 2: 'z'}
self.assertEqual(dict(result), expected)
# Multi-index.
s = pandas.Series(['x', 'y', 'z'])
s.index = pandas.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)])
result = _normalize_lazy(s)
self.assertIsInstance(result, IterItems)
expected = {(0, 0): 'x', (0, 1): 'y', (1, 0): 'z'}
self.assertEqual(dict(result), expected, 'multi-index should be tuples')
@unittest.skipIf(not numpy, 'numpy not found')
def test_normalize_numpy(self):
# Two-dimentional array.
arr = numpy.array([['a', 'x'], ['b', 'y']])
lazy = _normalize_lazy(arr)
self.assertIsInstance(lazy, Result)
self.assertEqual(lazy.fetch(), [('a', 'x'), ('b', 'y')])
# Two-valued structured array.
arr = numpy.array([('a', 1), ('b', 2)],
dtype=[('one', 'U10'), ('two', 'i4')])
lazy = _normalize_lazy(arr)
self.assertIsInstance(lazy, Result)
self.assertEqual(lazy.fetch(), [('a', 1), ('b', 2)])
# Two-valued recarray (record array).
arr = numpy.rec.array([('a', 1), ('b', 2)],
dtype=[('one', 'U10'), ('two', 'i4')])
lazy = _normalize_lazy(arr)
self.assertIsInstance(lazy, Result)
self.assertEqual(lazy.fetch(), [('a', 1), ('b', 2)])
# One-dimentional array.
arr = numpy.array(['x', 'y', 'z'])
lazy = _normalize_lazy(arr)
self.assertIsInstance(lazy, Result)
self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])
# Single-valued structured array.
arr = numpy.array([('x',), ('y',), ('z',)],
dtype=[('one', 'U10')])
lazy = _normalize_lazy(arr)
self.assertIsInstance(lazy, Result)
self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])
# Single-valued recarray (record array).
arr = numpy.rec.array([('x',), ('y',), ('z',)],
dtype=[('one', 'U10')])
lazy = _normalize_lazy(arr)
self.assertIsInstance(lazy, Result)
self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])
# Three-dimentional array--conversion is not supported.
arr = numpy.array([[[1, 3], ['a', 'x']], [[2, 4], ['b', 'y']]])
result = _normalize_lazy(arr)
self.assertIs(result, arr, msg='unsupported, returns unchanged')
class TestNormalizeEager(unittest.TestCase):
def test_unchanged(self):
"""For given instances, should return original object."""
requirement = [1, 2, 3]
self.assertIs(_normalize_eager(requirement), requirement)
class MyRequirement(BaseRequirement):
def __init__(self):
pass
def __iter__(self):
return iter([])
def check_data():
return None
requirement = MyRequirement()
self.assertIs(_normalize_eager(requirement), requirement)
def test_exhaustible_type(self):
with self.assertRaises(TypeError, msg='cannot use generic iter'):
_normalize_eager(iter([1, 2, 3]))
output = _normalize_eager(iter([1, 2, 3]), default_type=set)
self.assertEqual(output, set([1, 2, 3]))
def test_result_object(self):
result_obj = Result(iter([1, 2, 3]), evaluation_type=tuple)
output = _normalize_eager(result_obj)
self.assertIsInstance(output, tuple)
self.assertEqual(output, (1, 2, 3))
def test_iter_items(self):
items = IterItems(iter([(0, 'x'), (1, 'y'), (2, 'z')]))
output = _normalize_eager(items)
self.assertIsInstance(output, dict)
self.assertEqual(output, {0: 'x', 1: 'y', 2: 'z'})
|
|
# -*- coding: utf-8 -*-
"""
The model class for Mesa framework.
Core Objects: Model
"""
import datetime as dt
import random
import numpy
class Model:
""" Base class for models. """
def __init__(self, seed=None):
""" Create a new model. Overload this method with the actual code to
start the model.
Args:
seed: seed for the random number generator
Attributes:
schedule: schedule object
running: a bool indicating if the model should continue running
"""
# seed both the numpy and Python random number generators
if seed is None:
self.seed = dt.datetime.now()
else:
self.seed = seed
random.seed(seed)
numpy.random.seed(seed)
self.running = True
self.schedule = None
self.current_id = 0
def run_model(self):
""" Run the model until the end condition is reached. Overload as
needed.
"""
while self.running:
self.step()
def step(self):
""" A single step. Fill in here. """
pass
def next_id(self):
""" Return the next unique ID for agents, increment current_id"""
self.current_id += 1
return self.current_id
|
|
import os
import copy
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import f1_score
from utils import load_model_dict
from models import init_model_dict
from train_test import prepare_trte_data, gen_trte_adj_mat, test_epoch
cuda = True if torch.cuda.is_available() else False
def cal_feat_imp(data_folder, model_folder, view_list, num_class):
num_view = len(view_list)
dim_hvcdn = pow(num_class,num_view)
if data_folder == 'ROSMAP':
adj_parameter = 2
dim_he_list = [200,200,100]
if data_folder == 'BRCA':
adj_parameter = 10
dim_he_list = [400,400,200]
data_tr_list, data_trte_list, trte_idx, labels_trte = prepare_trte_data(data_folder, view_list)
adj_tr_list, adj_te_list = gen_trte_adj_mat(data_tr_list, data_trte_list, trte_idx, adj_parameter)
featname_list = []
for v in view_list:
df = pd.read_csv(os.path.join(data_folder, str(v)+"_featname.csv"), header=None)
featname_list.append(df.values.flatten())
dim_list = [x.shape[1] for x in data_tr_list]
model_dict = init_model_dict(num_view, num_class, dim_list, dim_he_list, dim_hvcdn)
for m in model_dict:
if cuda:
model_dict[m].cuda()
model_dict = load_model_dict(model_folder, model_dict)
te_prob = test_epoch(data_trte_list, adj_te_list, trte_idx["te"], model_dict)
if num_class == 2:
f1 = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1))
else:
f1 = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1), average='macro')
feat_imp_list = []
for i in range(len(featname_list)):
feat_imp = {"feat_name":featname_list[i]}
feat_imp['imp'] = np.zeros(dim_list[i])
for j in range(dim_list[i]):
feat_tr = data_tr_list[i][:,j].clone()
feat_trte = data_trte_list[i][:,j].clone()
data_tr_list[i][:,j] = 0
data_trte_list[i][:,j] = 0
adj_tr_list, adj_te_list = gen_trte_adj_mat(data_tr_list, data_trte_list, trte_idx, adj_parameter)
te_prob = test_epoch(data_trte_list, adj_te_list, trte_idx["te"], model_dict)
if num_class == 2:
f1_tmp = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1))
else:
f1_tmp = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1), average='macro')
feat_imp['imp'][j] = (f1-f1_tmp)*dim_list[i]
data_tr_list[i][:,j] = feat_tr.clone()
data_trte_list[i][:,j] = feat_trte.clone()
feat_imp_list.append(pd.DataFrame(data=feat_imp))
return feat_imp_list
def summarize_imp_feat(featimp_list_list, topn=30):
num_rep = len(featimp_list_list)
num_view = len(featimp_list_list[0])
df_tmp_list = []
for v in range(num_view):
df_tmp = copy.deepcopy(featimp_list_list[0][v])
df_tmp['omics'] = np.ones(df_tmp.shape[0], dtype=int)*v
df_tmp_list.append(df_tmp.copy(deep=True))
df_featimp = pd.concat(df_tmp_list).copy(deep=True)
for r in range(1,num_rep):
for v in range(num_view):
df_tmp = copy.deepcopy(featimp_list_list[r][v])
df_tmp['omics'] = np.ones(df_tmp.shape[0], dtype=int)*v
df_featimp = df_featimp.append(df_tmp.copy(deep=True), ignore_index=True)
df_featimp_top = df_featimp.groupby(['feat_name', 'omics'])['imp'].sum()
df_featimp_top = df_featimp_top.reset_index()
df_featimp_top = df_featimp_top.sort_values(by='imp',ascending=False)
df_featimp_top = df_featimp_top.iloc[:topn]
print('{:}\t{:}'.format('Rank','Feature name'))
for i in range(len(df_featimp_top)):
print('{:}\t{:}'.format(i+1,df_featimp_top.iloc[i]['feat_name']))
|
|
import torch
import torch.nn.functional as F
from utils.tensor import _transpose_and_gather_feat, _sigmoid
import numpy as np
class DetectionLoss(torch.nn.Module):
def __init__(
self, hm_weight, wh_weight, off_weight, kp_weight=None,
angle_weight=1.0, periodic=False, kp_indices=None,
kp_distance_weight=0.1):
super().__init__()
self.crit_hm = FocalLoss(weight=hm_weight)
self.crit_reg = RegL1Loss(off_weight)
self.crit_hw = RegL1Loss(
wh_weight,
angle_weight) if not periodic else PeriodicRegL1Loss(
wh_weight,
angle_weight)
self.with_keypoints = False
self.kp_distance_indices = None
if kp_weight is not None or kp_indices is not None:
self.with_keypoints = True
self.crit_kp = KPSL1Loss(kp_weight, kp_indices, kp_distance_weight)
def forward(self, output, batch):
hm_loss, wh_loss, off_loss, kp_loss = 0.0, 0.0, 0.0, 0.0
output['hm'] = _sigmoid(output['hm'])
hm_loss += self.crit_hm(output['hm'], batch['hm'])
wh_loss += self.crit_hw(output['wh'], batch['reg_mask'],
batch['ind'], batch['wh'])
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg'])
loss = hm_loss + wh_loss + off_loss
if self.with_keypoints:
kp_loss += self.crit_kp(output['kps'], batch['kp_reg_mask'],
batch['ind'], batch['kps'])
loss += kp_loss
loss_stats = {'centernet_loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
if self.with_keypoints:
loss_stats['kp_loss'] = kp_loss
return loss, loss_stats
class FocalLoss(torch.nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self, weight=1.0):
super().__init__()
self.weight = weight
def forward(self, out, target):
return self._neg_loss(out, target)
def _neg_loss(self, pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * \
neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss * self.weight
class RegL1Loss(torch.nn.Module):
def __init__(self, weight=1.0, angle_weight=1.0):
super().__init__()
self.weight = weight
self.angle_weight = angle_weight
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
pred *= mask
target *= mask
# if we have angle
if pred.shape[-1] == 3:
pred_wh = pred[..., 0:2]
pred_angle = _sigmoid(pred[..., 2:3])
target_wh = target[..., 0:2]
target_angle = _sigmoid(target[..., 2:3])
loss = F.l1_loss(pred_wh, target_wh, size_average=False)
loss = loss / (mask.sum() + 1e-4)
a_loss = F.l1_loss(pred_angle, target_angle, size_average=False)
a_loss = a_loss / (mask.sum() + 1e-4)
loss *= self.weight
loss += a_loss * self.angle_weight
else:
loss = F.l1_loss(pred, target, size_average=False)
loss = loss / (mask.sum() + 1e-4)
loss *= self.weight
return loss
class KPSL1Loss(torch.nn.Module):
def __init__(self, weight=1.0, kps_weight_indices=None,
distance_weight=0.1):
super().__init__()
self.weight = weight
self.distance_weight = distance_weight
self.kps_weight_indices = torch.tensor(
kps_weight_indices) if kps_weight_indices else None
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.float()
pred *= mask
target *= mask
loss = F.l1_loss(pred, target, size_average=False)
loss = loss / (mask.sum() + 1e-4)
loss *= self.weight
if self.kps_weight_indices is not None:
n, c, k = target.size()
k = k // 2
p_a = pred.view(n, c, k, 2)[:, :, self.kps_weight_indices[:, 0], :]
p_b = pred.view(n, c, k, 2)[:, :, self.kps_weight_indices[:, 1], :]
t_a = target.view(
n, c, k, 2)[
:, :, self.kps_weight_indices[:, 0],
:]
t_b = target.view(
n, c, k, 2)[
:, :, self.kps_weight_indices[:, 1],
:]
pred_distances = torch.abs(p_a - p_b).sum(-1)
target_distances = torch.abs(t_a - t_b).sum(-1)
dist_loss = F.l1_loss(
pred_distances,
target_distances,
size_average=False)
dist_loss = dist_loss / (mask.sum() + 1e-4)
dist_loss *= self.distance_weight
loss += dist_loss
return loss
class PeriodicRegL1Loss(torch.nn.Module):
def __init__(self, wh_weight=1.0, angle_weight=1.0):
super().__init__()
self.wh_weight = wh_weight
self.angle_weight = angle_weight
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
pred *= mask
target *= mask
pred_wh = pred[..., 0:2]
pred_angle = _sigmoid(pred[..., 2:3]) * 2 * np.pi - np.pi
target_wh = target[..., 0:2]
target_angle = torch.deg2rad(target[..., 2:3])
# loss = F.l1_loss(pred * mask, target * mask,
# reduction='elementwise_mean')
loss = F.l1_loss(pred_wh, target_wh, size_average=False)
loss = loss / (mask.sum() + 1e-4)
periodic_loss = torch.abs(
torch.remainder(
(pred_angle - target_angle) - np.pi / 2, np.pi) - np.pi / 2
)
periodic_loss = periodic_loss.sum() / (mask.sum() + 1e-4)
loss *= self.wh_weight
loss += periodic_loss * self.angle_weight
return loss
|
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from tardis.plasma.properties import YgData
def test_exp1_times_exp():
x = np.array([499.0, 501.0, 710.0])
desired = np.array([0.00200000797, 0.0019920397, 0.0014064725])
actual = YgData.exp1_times_exp(x)
assert_allclose(actual, desired)
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# ----------------------------------------------------------------------------
"""
Tests for the Variable Explorer Collections Editor.
"""
# Standard library imports
import os # Example module for testing display inside CollecitonsEditor
from os import path
import copy
import datetime
from xml.dom.minidom import parseString
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import numpy
import pandas
import pytest
from flaky import flaky
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget
# Local imports
from spyder.plugins.variableexplorer.widgets.collectionseditor import (
RemoteCollectionsEditorTableView, CollectionsEditorTableView,
CollectionsModel, CollectionsEditor, LARGE_NROWS, ROWS_TO_LOAD)
from spyder.plugins.variableexplorer.widgets.namespacebrowser import (
NamespacesBrowserFinder)
from spyder.plugins.variableexplorer.widgets.tests.test_dataframeeditor import \
generate_pandas_indexes
from spyder.py3compat import PY2
# =============================================================================
# Constants
# =============================================================================
# Full path to this file's parent directory for loading data
LOCATION = path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# =============================================================================
# Utility functions
# =============================================================================
def data(cm, i, j):
return cm.data(cm.index(i, j))
def data_table(cm, n_rows, n_cols):
return [[data(cm, i, j) for i in range(n_rows)] for j in range(n_cols)]
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture
def nonsettable_objects_data():
"""Rturn Python objects with immutable attribs to test CollectionEditor."""
test_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
expected_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
keys_test = [["_typ", "day", "dayofyear", "hour"],
["_typ", "nbytes", "ndim"]]
return zip(test_objs, expected_objs, keys_test)
# =============================================================================
# Tests
# ============================================================================
def test_rename_variable(qtbot):
"""Test renaming of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
editor.rename_item(new_name='b2')
assert editor.model.rowCount() == 5
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'b2'
assert data(editor.model, 2, 0) == 'c'
assert data(editor.model, 3, 0) == 'd'
assert data(editor.model, 4, 0) == 'e'
# Reset variables and try renaming one again
new_variables = {'a': 1,
'b': 2,
'b2': 2,
'c': 3,
'd': '4',
'e': 5}
editor.set_data(new_variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.rename_item(new_name='b3')
assert editor.model.rowCount() == 6
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'b2'
assert data(editor.model, 2, 0) == 'b3'
assert data(editor.model, 3, 0) == 'c'
assert data(editor.model, 4, 0) == 'd'
assert data(editor.model, 5, 0) == 'e'
def test_remove_variable(qtbot):
"""Test removing of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
def test_remove_remote_variable(qtbot, monkeypatch):
"""Test the removing of the correct remote variable."""
variables = {'a': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '1'},
'b': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '2'},
'c': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '3'},
'd': {'type': 'str',
'size': 1, 'color': '#800000',
'view': '4'},
'e': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '5'}}
editor = RemoteCollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
# Monkey patch remove variables
def remove_values(ins, names):
assert names == ['b']
data = {'a': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '1'},
'c': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '3'},
'd': {'type': 'str',
'size': 1, 'color': '#800000',
'view': '4'},
'e': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '5'}}
editor.set_data(data)
monkeypatch.setattr(
'spyder.plugins.variableexplorer.widgets'
'.collectionseditor.RemoteCollectionsEditorTableView.remove_values',
remove_values)
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
def test_filter_rows(qtbot):
"""Test rows filtering."""
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'dfa': df, 'dfb': df})
editor.finder = NamespacesBrowserFinder(editor,
editor.set_regex)
qtbot.addWidget(editor)
# Initially two rows
assert editor.model.rowCount() == 2
# Match two rows by name
editor.finder.setText("df")
assert editor.model.rowCount() == 2
# Match two rows by type
editor.finder.setText("DataFrame")
assert editor.model.rowCount() == 2
# Only one match
editor.finder.setText("dfb")
assert editor.model.rowCount() == 1
# No match
editor.finder.setText("dfbc")
assert editor.model.rowCount() == 0
def test_create_dataframeeditor_with_correct_format(qtbot, monkeypatch):
MockDataFrameEditor = Mock()
mockDataFrameEditor_instance = MockDataFrameEditor()
monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',
MockDataFrameEditor)
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'df': df})
qtbot.addWidget(editor)
editor.set_dataframe_format('%10d')
editor.delegate.createEditor(None, None, editor.model.index(0, 3))
mockDataFrameEditor_instance.dataModel.set_format.assert_called_once_with('%10d')
def test_accept_sig_option_changed_from_dataframeeditor(qtbot, monkeypatch):
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'df': df})
qtbot.addWidget(editor)
editor.set_dataframe_format('%10d')
assert editor.source_model.dataframe_format == '%10d'
editor.delegate.createEditor(None, None, editor.model.index(0, 3))
dataframe_editor = next(iter(editor.delegate._editors.values()))['editor']
qtbot.addWidget(dataframe_editor)
dataframe_editor.sig_option_changed.emit('dataframe_format', '%5f')
assert editor.source_model.dataframe_format == '%5f'
def test_collectionsmodel_with_two_ints():
coll = {'x': 1, 'y': 2}
cm = CollectionsModel(None, coll)
assert cm.rowCount() == 2
assert cm.columnCount() == 5
# dict is unordered, so first row might be x or y
assert data(cm, 0, 0) in {'x',
'y'}
if data(cm, 0, 0) == 'x':
row_with_x = 0
row_with_y = 1
else:
row_with_x = 1
row_with_y = 0
assert data(cm, row_with_x, 1) == 'int'
assert data(cm, row_with_x, 2) == 1
assert data(cm, row_with_x, 3) == '1'
assert data(cm, row_with_y, 0) == 'y'
assert data(cm, row_with_y, 1) == 'int'
assert data(cm, row_with_y, 2) == 1
assert data(cm, row_with_y, 3) == '2'
def test_collectionsmodel_with_index():
# Regression test for spyder-ide/spyder#3380,
# modified for spyder-ide/spyder#3758.
for rng_name, rng in generate_pandas_indexes().items():
coll = {'rng': rng}
cm = CollectionsModel(None, coll)
assert data(cm, 0, 0) == 'rng'
assert data(cm, 0, 1) == rng_name
assert data(cm, 0, 2) == '(20,)' or data(cm, 0, 2) == '(20L,)'
try:
assert data(cm, 0, 3) == rng._summary()
except AttributeError:
assert data(cm, 0, 3) == rng.summary()
def test_shows_dataframeeditor_when_editing_index(qtbot, monkeypatch):
for rng_name, rng in generate_pandas_indexes().items():
MockDataFrameEditor = Mock()
mockDataFrameEditor_instance = MockDataFrameEditor()
monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',
MockDataFrameEditor)
coll = {'rng': rng}
editor = CollectionsEditorTableView(None, coll)
editor.delegate.createEditor(None, None,
editor.model.index(0, 3))
mockDataFrameEditor_instance.show.assert_called_once_with()
@pytest.mark.skipif(os.name == 'nt' and PY2, reason='Fails on Win and py2')
def test_sort_collectionsmodel():
var_list1 = [0, 1, 2]
var_list2 = [3, 4, 5, 6]
var_dataframe1 = pandas.DataFrame([[1, 2, 3], [20, 30, 40], [2, 2, 2]])
var_dataframe2 = pandas.DataFrame([[1, 2, 3], [20, 30, 40]])
var_series1 = pandas.Series(var_list1)
var_series2 = pandas.Series(var_list2)
coll = [1, 3, 2]
cm = CollectionsModel(None, coll)
assert cm.rowCount() == 3
assert cm.columnCount() == 5
cm.sort(0) # sort by index
assert data_table(cm, 3, 4) == [[0, 1, 2],
['int', 'int', 'int'],
[1, 1, 1],
['1', '3', '2']]
cm.sort(3) # sort by value
assert data_table(cm, 3, 4) == [[0, 2, 1],
['int', 'int', 'int'],
[1, 1, 1],
['1', '2', '3']]
coll = [1, var_list1, var_list2, var_dataframe1, var_dataframe2,
var_series1, var_series2]
cm = CollectionsModel(None, coll)
assert cm.rowCount() == 7
assert cm.columnCount() == 5
cm.sort(1) # sort by type
assert data_table(cm, 7, 4) == [
[3, 4, 5, 6, 0, 1, 2],
['DataFrame', 'DataFrame', 'Series', 'Series', 'int', 'list', 'list'],
['(3, 3)', '(2, 3)', '(3,)', '(4,)', 1, 3, 4],
['Column names: 0, 1, 2',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
'Series object of pandas.core.series module',
'1',
'[0, 1, 2]',
'[3, 4, 5, 6]']]
cm.sort(2) # sort by size
assert data_table(cm, 7, 4) == [
[3, 4, 5, 6, 0, 1, 2],
['DataFrame', 'DataFrame', 'Series', 'Series', 'int', 'list', 'list'],
['(2, 3)', '(3,)', '(3, 3)', '(4,)', 1, 3, 4],
['Column names: 0, 1, 2',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
'Series object of pandas.core.series module',
'1',
'[0, 1, 2]',
'[3, 4, 5, 6]']] or data_table(cm, 7, 4) == [
[0, 1, 2, 4, 5, 3, 6],
[u'int', u'list', u'list', u'DataFrame', u'Series', u'DataFrame',
u'Series'],
[1, 3, 4, u'(2, 3)', u'(3,)', u'(3, 3)', u'(4,)'],
['1',
'[0, 1, 2]',
'[3, 4, 5, 6]',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
]]
def test_sort_collectionsmodel_with_many_rows():
coll = list(range(2*LARGE_NROWS))
cm = CollectionsModel(None, coll)
assert cm.rowCount() == cm.rows_loaded == ROWS_TO_LOAD
assert cm.columnCount() == 5
cm.sort(1) # This was causing an issue (#5232)
cm.fetchMore()
assert cm.rowCount() == 2 * ROWS_TO_LOAD
for _ in range(3):
cm.fetchMore()
assert cm.rowCount() == len(coll)
def test_rename_and_duplicate_item_in_collection_editor():
collections = {'list': ([1, 2, 3], False, True),
'tuple': ((1, 2, 3), False, False),
'dict': ({'a': 1, 'b': 2}, True, True)}
for coll, rename_enabled, duplicate_enabled in collections.values():
coll_copy = copy.copy(coll)
editor = CollectionsEditorTableView(None, coll)
assert editor.rename_action.isEnabled()
assert editor.duplicate_action.isEnabled()
editor.setCurrentIndex(editor.model.index(0, 0))
editor.refresh_menu()
assert editor.rename_action.isEnabled() == rename_enabled
assert editor.duplicate_action.isEnabled() == duplicate_enabled
if isinstance(coll, list):
editor.duplicate_item()
assert editor.source_model.get_data() == coll_copy + [coll_copy[0]]
def test_edit_mutable_and_immutable_types(monkeypatch):
"""
Test that mutable objs/vals are editable in VarExp; immutable ones aren't.
Regression test for spyder-ide/spyder#5991.
"""
MockQLineEdit = Mock()
attr_to_patch_qlineedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.QLineEdit')
monkeypatch.setattr(attr_to_patch_qlineedit, MockQLineEdit)
MockTextEditor = Mock()
attr_to_patch_textedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.TextEditor')
monkeypatch.setattr(attr_to_patch_textedit, MockTextEditor)
MockQDateTimeEdit = Mock()
attr_to_patch_qdatetimeedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.QDateTimeEdit')
monkeypatch.setattr(attr_to_patch_qdatetimeedit, MockQDateTimeEdit)
MockCollectionsEditor = Mock()
mockCollectionsEditor_instance = MockCollectionsEditor()
attr_to_patch_coledit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionseditor.CollectionsEditor')
monkeypatch.setattr(attr_to_patch_coledit, MockCollectionsEditor)
list_test = [1, "012345678901234567901234567890123456789012",
datetime.datetime(2017, 12, 24, 7, 9), [1, 2, 3], (2, "eggs")]
tup_test = tuple(list_test)
# Tests for mutable type (list) #
editor_list = CollectionsEditorTableView(None, list_test)
# Directly editable values inside list
editor_list_value = editor_list.delegate.createEditor(
None, None, editor_list.model.index(0, 3))
assert editor_list_value is not None
assert MockQLineEdit.call_count == 1
# Text Editor for long text inside list
editor_list.delegate.createEditor(None, None,
editor_list.model.index(1, 3))
assert MockTextEditor.call_count == 2
assert not MockTextEditor.call_args[1]["readonly"]
# Datetime inside list
editor_list_datetime = editor_list.delegate.createEditor(
None, None, editor_list.model.index(2, 3))
assert editor_list_datetime is not None
assert MockQDateTimeEdit.call_count == 1
# List inside list
editor_list.delegate.createEditor(None, None,
editor_list.model.index(3, 3))
assert mockCollectionsEditor_instance.show.call_count == 1
assert not mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
# Tuple inside list
editor_list.delegate.createEditor(None, None,
editor_list.model.index(4, 3))
assert mockCollectionsEditor_instance.show.call_count == 2
assert mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
# Tests for immutable type (tuple) #
editor_tup = CollectionsEditorTableView(None, tup_test)
# Directly editable values inside tuple
editor_tup_value = editor_tup.delegate.createEditor(
None, None, editor_tup.model.index(0, 3))
assert editor_tup_value is None
assert MockQLineEdit.call_count == 1
# Text Editor for long text inside tuple
editor_tup.delegate.createEditor(None, None,
editor_tup.model.index(1, 3))
assert MockTextEditor.call_count == 4
assert MockTextEditor.call_args[1]["readonly"]
# Datetime inside tuple
editor_tup_datetime = editor_tup.delegate.createEditor(
None, None, editor_tup.model.index(2, 3))
assert editor_tup_datetime is None
assert MockQDateTimeEdit.call_count == 1
# List inside tuple
editor_tup.delegate.createEditor(None, None,
editor_tup.model.index(3, 3))
assert mockCollectionsEditor_instance.show.call_count == 3
assert mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
# Tuple inside tuple
editor_tup.delegate.createEditor(None, None,
editor_tup.model.index(4, 3))
assert mockCollectionsEditor_instance.show.call_count == 4
assert mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
@flaky(max_runs=3)
def test_view_module_in_coledit():
"""
Test that modules don't produce an error when opening in Variable Explorer.
Also check that they are set as readonly. Regression test for
spyder-ide/spyder#6080.
"""
editor = CollectionsEditor()
editor.setup(os, "module_test", readonly=False)
assert editor.widget.editor.readonly
def test_notimplementederror_multiindex():
"""
Test that the NotImplementedError when scrolling a MultiIndex is handled.
Regression test for spyder-ide/spyder#6284.
"""
time_deltas = [pandas.Timedelta(minutes=minute)
for minute in range(5, 35, 5)]
time_delta_multiindex = pandas.MultiIndex.from_product([[0, 1, 2, 3, 4],
time_deltas])
col_model = CollectionsModel(None, time_delta_multiindex)
assert col_model.rowCount() == col_model.rows_loaded == ROWS_TO_LOAD
assert col_model.columnCount() == 5
col_model.fetchMore()
assert col_model.rowCount() == 2 * ROWS_TO_LOAD
for _ in range(3):
col_model.fetchMore()
assert col_model.rowCount() == 5 * ROWS_TO_LOAD
def test_editor_parent_set(monkeypatch):
"""
Test that editors have their parent set so they close with Spyder.
Regression test for spyder-ide/spyder#5696.
"""
# Mocking and setup
test_parent = QWidget()
MockCollectionsEditor = Mock()
attr_to_patch_coledit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionseditor.CollectionsEditor')
monkeypatch.setattr(attr_to_patch_coledit, MockCollectionsEditor)
MockArrayEditor = Mock()
attr_to_patch_arredit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.ArrayEditor')
monkeypatch.setattr(attr_to_patch_arredit, MockArrayEditor)
MockDataFrameEditor = Mock()
attr_to_patch_dfedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.DataFrameEditor')
monkeypatch.setattr(attr_to_patch_dfedit, MockDataFrameEditor)
MockTextEditor = Mock()
attr_to_patch_textedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.TextEditor')
monkeypatch.setattr(attr_to_patch_textedit, MockTextEditor)
MockObjectExplorer = Mock()
attr_to_patch_objectexplorer = ('spyder.plugins.variableexplorer.widgets.'
+ 'objectexplorer.ObjectExplorer')
monkeypatch.setattr(attr_to_patch_objectexplorer, MockObjectExplorer)
editor_data = [[0, 1, 2, 3, 4],
numpy.array([1.0, 42.0, 1337.0]),
pandas.DataFrame([[1, 2, 3], [20, 30, 40]]),
os,
"012345678901234567890123456789012345678901234567890123456"]
col_editor = CollectionsEditorTableView(test_parent, editor_data)
assert col_editor.parent() is test_parent
for idx, mock_class in enumerate([MockCollectionsEditor,
MockArrayEditor,
MockDataFrameEditor,
MockObjectExplorer,
MockTextEditor]):
col_editor.delegate.createEditor(col_editor.parent(), None,
col_editor.model.index(idx, 3))
assert mock_class.call_count == 1 + (idx // 4)
assert mock_class.call_args[1]["parent"] is test_parent
def test_xml_dom_element_view():
"""
Test that XML DOM ``Element``s are able to be viewied in CollectionsEditor.
Regression test for spyder-ide/spyder#5642.
"""
xml_path = path.join(LOCATION, 'dom_element_test.xml')
with open(xml_path) as xml_file:
xml_data = xml_file.read()
xml_content = parseString(xml_data)
xml_element = xml_content.getElementsByTagName("note")[0]
col_editor = CollectionsEditor(None)
col_editor.setup(xml_element)
col_editor.show()
assert col_editor.get_value()
col_editor.accept()
def test_pandas_dateoffset_view():
"""
Test that pandas ``DateOffset`` objs can be viewied in CollectionsEditor.
Regression test for spyder-ide/spyder#6729.
"""
test_dateoffset = pandas.DateOffset()
col_editor = CollectionsEditor(None)
col_editor.setup(test_dateoffset)
col_editor.show()
assert col_editor.get_value()
col_editor.accept()
def test_set_nonsettable_objects(nonsettable_objects_data):
"""
Test that errors trying to set attributes in ColEdit are handled properly.
Unit regression test for issues spyder-ide/spyder#6727 and
spyder-ide/spyder#6728.
"""
for test_obj, expected_obj, keys in nonsettable_objects_data:
col_model = CollectionsModel(None, test_obj)
indicies = [col_model.get_index_from_key(key) for key in keys]
for idx in indicies:
assert not col_model.set_value(idx, "2")
# Due to numpy's deliberate breakage of __eq__ comparison
assert all([key == "_typ" or
(getattr(col_model.get_data().__obj__, key)
== getattr(expected_obj, key)) for key in keys])
@flaky(max_runs=3)
@pytest.mark.no_xvfb
def test_edit_nonsettable_objects(qtbot, nonsettable_objects_data):
"""
Test that errors trying to edit attributes in ColEdit are handled properly.
Integration regression test for issues spyder-ide/spyder#6727 and
spyder-ide/spyder#6728.
"""
for test_obj, expected_obj, keys in nonsettable_objects_data:
col_editor = CollectionsEditor(None)
col_editor.setup(test_obj)
col_editor.show()
qtbot.waitForWindowShown(col_editor)
view = col_editor.widget.editor
indicies = [view.source_model.get_index_from_key(key) for key in keys]
for _ in range(3):
qtbot.keyClick(view, Qt.Key_Right)
last_row = -1
rows_to_test = [index.row() for index in indicies]
for row in rows_to_test:
for _ in range(row - last_row - 1):
qtbot.keyClick(view, Qt.Key_Down)
qtbot.keyClick(view, Qt.Key_Space)
qtbot.keyClick(view.focusWidget(), Qt.Key_Backspace)
qtbot.keyClicks(view.focusWidget(), "2")
qtbot.keyClick(view.focusWidget(), Qt.Key_Down)
last_row = row
qtbot.wait(100)
# Due to numpy's deliberate breakage of __eq__ comparison
assert all([key == "_typ" or (getattr(col_editor.get_value(), key)
== getattr(expected_obj, key)) for key in keys])
col_editor.accept()
qtbot.wait(200)
# Same reason as above
assert all([key == "_typ" or (getattr(col_editor.get_value(), key)
== getattr(expected_obj, key)) for key in keys])
assert all([getattr(test_obj, key)
== getattr(expected_obj, key) for key in keys])
def test_collectionseditor_with_class_having_buggy_copy(qtbot):
"""
Test that editor for object whose .copy() returns a different type is
readonly; cf. spyder-ide/spyder#6936.
"""
class MyDictWithBuggyCopy(dict):
pass
md = MyDictWithBuggyCopy({1: 2})
editor = CollectionsEditor()
editor.setup(md)
assert editor.widget.editor.readonly
def test_collectionseditor_with_class_having_correct_copy(qtbot):
"""
Test that editor for object whose .copy() returns the same type is not
readonly; cf. spyder-ide/spyder#6936.
"""
class MyDictWithCorrectCopy(dict):
def copy(self):
return MyDictWithCorrectCopy(self)
md = MyDictWithCorrectCopy({1: 2})
editor = CollectionsEditor()
editor.setup(md)
assert not editor.widget.editor.readonly
if __name__ == "__main__":
pytest.main()
|
|
'''An implementation of the GLYMMR alogrithm using some of the pre-existing CCARL framework.
GLYMMR Algorithm (from Cholleti et al, 2012)
1. Initialize each unique node among all the binding glycans as a subtree of size 1.
Let this set be S.
2. For each subtree in S:
- Calculate the number of binding glycans containing the subtree.
- If the subtree occurs in more than Tb (threshold parameter) binding glycans then:
- add it to a set of expandable subtrees (ES), and also to a set of possible motifs (PM).
3. If the set ES is not empty:
- Create an empty set NewS.
- For each substree in ES
- Expand the subtree by adding a node such that the new subtree exists in at least one of the binding glycans and add these subtrees to a set NewS.
- S = NewS
- Go to step 2
4. For each subtree in the set of possible motifs PM:
- Count the number of binding glycans (ns) and non-binding glycans (nw) containing the subtree.
- If ns > Tb and nw < Tn, where Tb and Tn are threshold parameters, add the subtree to a set of motifs, M. Monosaccharides are eliminated from this set.
5. Sort the set of motifs, M, in descending order of the number of binding glycans (ns) containing M and in ascending order of the number of non-binding glycans (nw) containing M.
6. From the sorted set M, add the motifs that do not exist in any non-binders to the output list, L; and add the top m motifs that exist in binders and non-binders to L.
The value “m” is a numeric parameter that can be set by user. If the filtering parameter is set to True, then remove motif sub-structures from L that exist in the same number
of referenced binding glycans compared to a larger motif.
Notes on GLYMMR Algorithm:
1. Steps 1-3 appear to be equivalent to frequent subtree mining from binding glycans. The Tb parameter is equivalent to the minimum support threshold.
2. Step 4 enforces that subtrees are mostly absent in negative binding glycans through a threshold parameter `Tn`. Monosaccharides are removed at this stage.
3. Default settings from original paper are m=3 (the number of motifs that exist in both binders and non-binders), filtering off (do not remove substructures).
Original paper set Tb = 4, for CFG 4.0. We can convert this to a minimum support value by dividing by the number of glycans in the positive set.
'''
from ccarl.glycan_graph_methods import generate_digraph_from_glycan_string
from ccarl.frequent_subtrees import get_frequent_subtrees
from ccarl.glycan_features import generate_features_from_subtrees
import numpy as np
def run_glymmr(glycans, binding, threshold_binders=4, minimum_support_threshold=None,
threshold_negative=None, m=3, filt=False):
'''Run GLYMMR algorithm on set of glycans.
Args:
glycans (list): A list of glycan strings in CFG format.
binding (list/np.array): A list/array of binding scores, where 1 indicates positive binding
and 0 indicates no binding.
threshold_binders (int): The minimum number of glycans that should contain a subtree/motif.
minimum_support_threshold (float): A minimum support threshold (between 0 and 1) to be used instead
of `threshold_binders`. If this is set, then the threshold_binders parameter is ignored.
threshold_negative (int): The maximum number of negative binders that a motif can be found in for it
to be included.
m (int): The number of motifs to return that are found in at least 1 negative binder.
filt (bool): Remove motifs that are a substructure of a larger motif, and are perfectly correlated
with the presence of the larger motif (not currently implemented!)
Returns:
list: A list of motifs as networkx.DiGraph objects.
'''
if filt:
raise NotImplementedError("Filtering substructures is not implemented yet!")
glycan_graphs = [generate_digraph_from_glycan_string(x) for x in glycans]
positive_glycans = [glycan for glycan, score in zip(glycan_graphs, binding) if score == 1]
if minimum_support_threshold is None:
support = threshold_binders / len(positive_glycans)
else:
support = minimum_support_threshold
mining_results = get_frequent_subtrees(positive_glycans, support=support)
subtrees = [x['subtree'] for x in mining_results]
# Remove single node subtrees
subtrees = list(filter(lambda x: len(x.nodes()) > 1, subtrees))
subtree_features = [generate_features_from_subtrees(subtrees, glycan) for glycan in glycan_graphs]
neg_set_count = np.sum(np.array(subtree_features)[binding == 0], axis=0)
pos_set_count = np.sum(np.array(subtree_features)[binding == 1], axis=0)
in_positive_and_not_negative = np.logical_and(np.logical_not(neg_set_count), pos_set_count)
if threshold_negative is not None:
below_negative_threshold = neg_set_count < threshold_negative
# Sort primarily on number of hits in negative set, then on number of hits in positive set to break ties.
# The original algorithm description was ambiguous, and the sorting appears to be unpredictable in the published paper.
# Doing what seems to be the most reasonable interpretation of the original GLYMMR paper.
sorted_motifs = sorted(list(zip(neg_set_count, pos_set_count, list(range(len(neg_set_count))))), key=lambda x: (x[0], -x[1]))
number_of_motifs = np.sum(in_positive_and_not_negative) + m
final_motifs = [subtrees[index] for neg_count, pos_count, index in sorted_motifs[0:number_of_motifs]]
return final_motifs
|
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import xlrd
#----------------funções auxiliares ------------------
def inverteDicionario(dicionario):
#função que recebe um dicionario e devolve um outro dicionario igual, mas com a ordem do elementos invertidos
#obs: não inverte chave com atributo, somente a ordem dos elementos dentro do dicionário
chaves=[]
for chave in dicionario.keys():
chaves.append(chave)
chaves.reverse()
dicionarioInvertido={}
for chave in chaves:
dicionarioInvertido[chave]=dicionario[chave]
return dicionarioInvertido
def geraPlot(arquivo, comMedia, comMediannaBarra,comMediaBarra):
# ------------- Definições padrões--------------------
# define tamanho dos textos
tamanho_texto_super_pequeno="xx-small"
tamanho_texto_pequeno="small"
tamanho_texto_normal="large"
tamanho_texto_grande="x-large"
# define Verde da média
verde="#008000"
#define vermelho da mediana
vermelho='#950070'
# cria lista de cores
cor=['yellow','orange','pink','lightgreen','green','lightblue','blue','purple', 'brown','gray','black']
#cor=['yellow','pink','lightblue','red','gray', 'brown','lightgreen','black','purple']
#endereço do arquivo
#arquivo= r'C:\Users\ericaugustin\Documents\Prototipos\2021\Box-Plot\boxplot.xls'
# ------------- Programa--------------------
# puxando dados planilha
w = xlrd.open_workbook(arquivo)
#define planilha que vai extrair dados
sheet = w.sheet_by_index(0) # selecionando a segunda planilha
nomes = sheet.row_values(0) #pega todos os valores da primeira linha (nomes)
# busca atributos na coluna 2 (1)
atributos_grafico = sheet.col_values(1)
#print(atributos_grafico)
titulo=atributos_grafico[0]
eixoX= atributos_grafico[1]
eixoY = atributos_grafico[2]
numero_ensaios= int(atributos_grafico[3])+1
#Numero de colunas da planilha
#cols = sheet.ncols # opção antiga
cols=0
for i in nomes:
if (i!=""):
cols+=1
if (cols>20):
tamanho_texto=tamanho_texto_super_pequeno
elif (cols>10):
tamanho_texto=tamanho_texto_pequeno
else:
tamanho_texto=tamanho_texto_normal
# busca as familias dos ensaios na planilha
familias = sheet.row_values(59)[3:]
#cria biblioteca de cores
corGrafico={}
j=0;
for i in familias:
if i not in corGrafico:
if (str(i).upper()=="REFERENCIA"or str(i).upper()=="REFERÊNCIA" or str(i).upper()=="REF" or str(i).upper()=="REFERENCE"):
numero_linha_media_referencia=j
corGrafico[i]='red'
else:
corGrafico[i]=cor[j];
j=j+1
#inverte o dicionário para manter o padrão na primeira posição da legenda
corGrafico=inverteDicionario(corGrafico)
#Define numero de ensaios
rows=numero_ensaios
# busca dados de linahs e colunas excluindo a primeira linha e as 3 primeiras colunas
col_data = [sheet.row_values(i, 3, cols) for i in range(rows)]
row_data = [sheet.col_values(i, 1, rows) for i in range(2,cols)]
# monta matriz clean e media
clean = []
media=[]
#Faz p calculo da media e monta a matriz clean limpando os valores vazios ou strings ou outros erros
for i in range(1,cols-2):
c = row_data[i]
erro=True
# Parte do principio que tem um erro e entra no loop para verificar
while(erro):
try:
# se detecta erro despreza o valor do vetor fazendo pop no except
media.append(np.average(c))
# se não encontra erra faz erro = false e sai do loop
erro=False
except:
# excluir o ultimo numero do vetor até não acontecer mais erros
c.pop(len(c)-1)
# adiciona a linha c no vetor clean
clean.append(c)
# criando area de plotagem e definindo variaves globais como nome do grafico
fig1, ax1 = plt.subplots()
# agrupando colunas de dados no [dataset] e nomesdas colunas no [nomes] usando numpy
#dataset=np.array(clean,float) # não mais utilizado devido refatoração do codigo
dataset=clean
#nomes= np.array(["Nome1","Nome2","Nome3"]) # não mais utilizado devido refatoração do codigo
#nomes=nomes[3:numero_ensaios+2]
nomes=nomes[3:]
if (len(nomes)==len(dataset)):
print("Numero de nomes:",len(nomes))
print("numero de dados:",len(dataset))
# cria propriedades da linha de media e da mediana
if comMediannaBarra:
propriedades_medianas={'color':vermelho,'linewidth':1.5}
else:
propriedades_medianas={'color':vermelho,'linewidth':0}
if comMediaBarra:
propriedades_medias={"linestyle":"-","color":verde}
else:
propriedades_medias={"linestyle":"-","color":verde}
# cria boxplot mostrando medias e linha de medias(showmean e meanline True) com dados na vertical (vert=False) sem outliers(showfliers=False)
#graf=ax1.boxplot(dataset,labels=nomes,vert=False,showmeans=True,meanline=True,medianprops=propriedades_medianas,meanprops=propriedades_medias,flierprops={"marker":"+"},patch_artist=True,showfliers=False)
# cria boxplot mostrando medias e linha de medias(showmean e meanline True) com dados na vertical (vert=False) com outliers
graf=ax1.boxplot(dataset,labels=nomes,vert=False,showmeans=comMediaBarra,meanline=comMediaBarra,medianprops=propriedades_medianas,meanprops=propriedades_medias,flierprops={"marker":"+"},patch_artist=True)
# Coloca um texto com o valor da média de cada coluna no grafico
if comMediaBarra:
if len(media)<3:
offset=1.1
elif len(media)<4:
offset=1.2
else:
offset=1.3
for i in range(len(media)):
if media[i]>1000000:
ax1.text(media[i],i+offset,"{:d}".format(media[i]),size=tamanho_texto,color=verde,horizontalalignment ='center')
elif media [i]>1000:
ax1.text(media[i],i+offset,"{:.1f}".format(media[i]),size=tamanho_texto,color=verde,horizontalalignment ='center')
elif media [i]>1:
ax1.text(media[i],i+offset,"{:.2f}".format(media[i]),size=tamanho_texto,color=verde,horizontalalignment ='center')
elif media [i]>0.0001:
ax1.text(media[i],i+offset,"{:.4f}".format(media[i]),size=tamanho_texto,color=verde,horizontalalignment ='center')
else:
ax1.text(media[i],i+offset,"{}".format(media[i]),size=tamanho_texto,color=verde,horizontalalignment ='center')
# define titulo do grafico e dos eixos
ax1.set_title(titulo, fontsize=tamanho_texto_grande,fontweight="bold")
ax1.set_xlabel(eixoX,fontsize=tamanho_texto_normal,fontweight="bold")
ax1.set_ylabel(eixoY,fontsize=tamanho_texto_normal,fontweight="bold")
#ajustes de posições para melhor enquadramento
fig1.subplots_adjust(left=0.17,right=0.98,top=0.96,bottom=0.07)
# Faz a linha media do ensaio de referencia se comMedia=True(ultimo)
print(comMedia)
if comMedia:
ax1.axvline(media[numero_linha_media_referencia], ymin=0, ymax=len(media),linewidth=1, color=verde,linestyle=':')
# exibe linha de grade
#ax1.yaxis.grid(True)
#ax1.xaxis.grid(True)
# pinta cada boxplot com a cor de sua familia
legenda=[]
# cria legenda
for patch, color in zip(graf['boxes'], familias):
patch.set_facecolor(corGrafico[color])
# define a cor de cada legenda e seus nomes
#print(corGrafico)
for f in corGrafico:
legenda.append(mpatches.Patch(corGrafico[f],facecolor=corGrafico[f], label=f))
#constroi a legenda
ax1.legend(handles=legenda).set_draggable(True)
# mostra o grafico
plt.show()
return True
else:
return False
|
|
#!/usr/bin/env python
import yaml
import numpy as np
from os.path import join
import matplotlib, os
try: os.environ['DISPLAY']
except KeyError: matplotlib.use('Agg')
from matplotlib import font_manager
import pylab as plt
from ugali.utils.shell import mkdir
import ugali.analysis.loglike
from ugali.utils.projector import cel2gal, gal2cel
import ugali.utils.plotting
from ugali.utils.config import Config
from ugali.analysis.kernel import Disk
from ugali.isochrone import Padova
import ugali.analysis.source
from dsphs.like.lnlfn import ProfileLimit
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
from matplotlib import patches
import mpl_toolkits.axes_grid1.axes_divider as axes_divider
from collections import OrderedDict as odict
#def scan(loglike,xdict,ydict):
# xpar,xvals = xdict.items()[0]
# ypar,yvals = ydict.items()[0]
# nx,ny = len(xvals),len(yvals)
#
# lnl,rich = [],[]
# for i in range(ny):
# print i,yvals[i]
# loglike.value(**{ypar:yvals[i]})
# for j in range(nx):
# loglike.value(**{xpar:xvals[j]})
# l,r,junk = loglike.fit_richness()
# rich.append(r)
# lnl.append(l)
# return np.array(lnl).reshape(ny,nx),np.array(rich).reshape(ny,nx)
def scan(loglike,xdict,ydict,zdict):
xpar,xvals = list(xdict.items())[0]
ypar,yvals = list(ydict.items())[0]
zpar,zvals = list(zdict.items())[0]
nx,ny,nz = len(xvals),len(yvals),len(zvals)
val,lnl,rich = [],[],[]
for i in range(nz):
print(i,"%s: %.2f"%(zpar,zvals[i]))
loglike.set_params(**{zpar:zvals[i]})
for j in range(ny):
loglike.set_params(**{ypar:yvals[j]})
for k in range(nx):
loglike.set_params(**{xpar:xvals[k]})
l,r,junk = loglike.fit_richness()
rich.append(r)
lnl.append(l)
val.append((zvals[i],yvals[j],xvals[k]))
return np.array(val),np.array(lnl).reshape(nz,ny,nx),np.array(rich).reshape(nz,ny,nx)
def plot(lnl):
pass
bounds = odict([
('richness',lambda r: [r-np.sqrt(r),r+np.sqrt(r)]),
#('lon',lambda l: [l-0.03,l+0.03]),
#('lat',lambda b: [b-.03,b+0.03]),
('lon',lambda l: [l-0.05,l+0.05]),
('lat',lambda b: [b-0.05,b+0.05]),
('extension',lambda e: [0.1*e,10.*e]),
('ellipticity',lambda e: [0.0001,0.8]),
('position_angle',lambda e: [0,180]),
('distance_modulus',lambda m: [m-1,m+1]),
('age',lambda a: [8,13.5]),
('metallicity',lambda z: [1e-4,1e-3]),
])
if __name__ == "__main__":
from ugali.utils.parser import Parser
parser = Parser(description="Plot fit diagnostics")
parser.add_coords(radius=True,targets=True)
parser.add_config(default='config_y2q1_mcmc.yaml',nargs='?')
parser.add_force()
parser.add_argument('-n','--name',default=None)
parser.add_argument('--xpar',default='extension',help="Fast parameter")
parser.add_argument('--xbins',default=10)
parser.add_argument('--ypar',default='distance_modulus',help="Slow parameter")
parser.add_argument('--ybins',default=10)
parser.add_argument('--zpar',default='age',help="Slowest parameter")
parser.add_argument('--zbins',default=10)
parser.add_argument('--alpha',default=0.1)
opts = parser.parse_args()
alpha = opts.alpha
config = opts.config
dirname = 'mcmc_v01'
srcmdl = 'srcmdl.yaml'
if opts.name: names = [opts.name]
else: names = opts.names
outdir = mkdir('plots')
a = 13.5
z = 0.0001
for name in names:
if opts.name is not None:
if name.lower() != opts.name.lower(): continue
print(name)
#ra,dec = params['ra'],params['dec']
#lon,lat = cel2gal(ra,dec)
#params['lon'],params['lat'] = lon,lat
#params['age'] = a
#params['metallicity'] = z
#srcmdl = join(dirname,'%s_mcmc.yaml'%name)
source = ugali.analysis.source.Source()
source.load(srcmdl,name)
loglike = ugali.analysis.loglike.createLoglike(config,source)
params = source.params
xpar = opts.xpar
ypar = opts.ypar
zpar = opts.zpar
xval = params[xpar]
yval = params[ypar]
zval = params[zpar]
fmt = '%s = %.5g [+%.2g,-%.2g]'
#loglike = ugali.analysis.loglike.createLoglike(config,lon,lat)
##loglike.models['color'] = Padova(age=12.5,z=0.0002,hb_spread=0)
#loglike.value(**params)
for p in [xpar,ypar]:
b = bounds[p]
v = source.params[p].value
source.params[p].set_bounds(b(v))
xmin,xmax = source.params[xpar].bounds
ymin,ymax = source.params[ypar].bounds
zmin,zmax = source.params[zpar].bounds
x = np.linspace(xmin,xmax,opts.xbins)
y = np.linspace(ymin,ymax,opts.ybins)
z = np.linspace(zmin,zmax,opts.zbins)
nx,ny,nz = len(x),len(y),len(z)
val,lnl,rich = scan(loglike,{xpar:x},{ypar:y},{zpar:z})
#lnl,richness = scan(loglike,{xpar:x},{ypar:y},{zpar:z})
ts = 2*lnl
xx,yy,zz = np.meshgrid(x,y,z)
maxlnl = np.max(lnl)
idx =np.argmax(lnl)
zidx,yidx,xidx = np.unravel_index(idx,lnl.shape)
print(list(zip([xpar,ypar,zpar],[x[xidx],y[yidx],z[zidx]])))
# Probably a better way to do the profile with more variables...
#stackoverflow.com/q/30589211
#lnlike = np.max(lnl.shape(nz,-1),axis=1)
results =dict()
for i,(p,v) in enumerate(zip([xpar,ypar,zpar],[x,y,z])):
# Not great, but clear
lnlike = np.max(np.max(np.swapaxes(lnl,i,0),axis=0),axis=0)
lnlfn = ProfileLimit(v,lnlike)
lnlfn._mle = v[np.argmax(lnlike)]
lnlfn._fmax = np.max(lnlike)
mle = lnlfn._mle
try:
lo = lnlfn.getLowerLimit(alpha/2)
except ValueError:
lo = np.nan
try:
hi = lnlfn.getUpperLimit(alpha/2)
except ValueError:
hi = np.nan
results[p] = [lnlfn, mle, [lo,hi]]
##richness = np.array(richness).reshape(xx.shape)
#like = np.exp(lnl-lnl.max())
#maxlike = np.max(like)
#idx =np.argmax(like)
#yidx,xidx = np.unravel_index(idx,lnl.shape)
#print x[xidx],y[yidx]
#loglike.value(**{xpar:x[xidx],ypar:y[yidx]})
#richs = np.logspace(np.log10(richness.flat[idx])-1,np.log10(richness.flat[idx])+1,100)
#rich_lnlfn = ProfileLimit(richs,np.array([loglike.value(richness=r) for r in richs]))
"""
# Plotting... lot's of plotting
lnlmax = np.max(lnl)
data = lnl - lnlmax
im_kwargs = dict(extent = [xmin,xmax,ymin,ymax],aspect='auto',
interpolation='none',origin='lower')
mle_kwargs = dict(color='black',ls='-')
err_kwargs = dict(color='black',ls='--')
cs_kwargs = dict(colors='0.66',ls='-',**im_kwargs)
fig,ax = plt.subplots()
ax.imshow(data,**im_kwargs)
levels = odict([
(-1.0/2. ,'68%'),
(-1.64/2.,'80%'),
(-2.71/2.,'90%'),
(-3.84/2.,'95%'),
])
cs = plt.contour(data,levels=levels.keys(),**cs_kwargs)
plt.clabel(cs,fmt=levels,inline=1,fontsize=8)
plt.plot(x[xidx],y[yidx],'x',ms=10,mew=5,**mle_kwargs)
#plt.plot(results[0][0],results[1][0],'bx',ms=10,mew=5)
plt.plot(x,y[np.argmax(like,axis=0)],lw=1.5,**err_kwargs)
#ax2 = ugali.utils.plotting.draw_sum_slices(data)
ax2 = ugali.utils.plotting.draw_max_slices(data)
ann_kwargs = dict(xycoords='axes fraction',fontsize=8,
bbox={'facecolor':'w'})
for i, r in enumerate(results):
attr = 'axvline' if i==0 else 'axhline'
par = xpar if i==0 else ypar
getattr(ax,attr)(r[0],**mle_kwargs)
getattr(ax2[i],attr)(r[0],**mle_kwargs)
for l in r[1]:
getattr(ax,attr)(l,**err_kwargs)
getattr(ax2[i],attr)(l,**err_kwargs)
ax.annotate(fmt%(par,r[0],r[1][1]-r[0],r[0]-r[1][0]),
xy=(0.05,.9-0.05*i),**ann_kwargs)
ax.annotate('TS = %.0f'%(2*lnlmax),
xy=(0.75,.9),**ann_kwargs)
ax.set_xlabel(xpar)
ax.set_ylabel(ypar)
outfile = os.path.join(outdir,'%s_%s_%s.png'%(name.lower(),xpar,ypar))
plt.savefig(outfile,bbox_inches='tight')
"""
plt.ion()
"""
rich_mle = rich_lnlfn._mle
rich_lo = rich_lnlfn.getLowerLimit(alpha/2)
rich_hi = rich_lnlfn.getUpperLimit(alpha/2)
fig,axes = plt.subplots(1,3,figsize=(12,4))
log_exts = np.log10(exts)
levels = [ts.max(),ts.max()-2.71/2, ts.max() - 3.83/2]
ax = axes[2]
im = ax.imshow(ts,**kwargs)
#draw_slices(ts)
#ax.set_xlim(extent[0],extent[1])
#ax.set_ylim(extent[2],extent[3])
cb = plt.colorbar(im)
cb.set_label('TS')
plt.contour(ts,**kwargs)
plt.plot(np.log10(xx).flat[np.argmax(ts)],yy.flat[np.argmax(ts)],'kx',ms=10,mew=5)
ann_kwargs = dict(xycoords='axes fraction',fontsize=8,bbox={'facecolor':'w'})
outfile = os.path.join(lnldir,'%s_ext_mod_tsmap.png'%name.lower())
plt.savefig(outfile,bbox_inches='tight')
print 'Max TS:',ts.max()
print 'Richness MLE:',rich_mle,[rich_lo,rich_hi]
print 'Extension MLE:',ext_mle,[ext_lo,ext_hi]
print 'Distance Modulus MLE:', mod_mle,[mod_lo,mod_hi]
"""
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import warnings
from datetime import datetime
import numpy as np
from numpy import cov
from scipy import linalg
def time_format():
return f"{datetime.now()}|> "
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; adding %s to diagonal of cov estimates"
% eps
)
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (
diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
)
def calculate_fid(act1, act2):
mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False)
mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False)
fid = calculate_frechet_distance(mu1, sigma1, mu2, sigma2)
return fid
|
|
import os
import subprocess
import sys
import shutil
import pandas as pd
import argparse
import numpy as np
import boto3
from datetime import datetime
# from botocore.exceptions import ClientError
from botocore.config import Config
from boto3.dynamodb.conditions import Key
config = Config(
retries = {
'max_attempts': 10,
'mode': 'standard'
}
)
##############################################
# Step 1. Create resources
##############################################
# Obtain the config for this run from the
dateString = os.getenv('DATE_PARTITION')
seqFile = os.getenv('SEQ_BATCH_FILE') # Path to the EFS file that we wish to process
seqConsensusFile = os.getenv('SEQ_CONSENSUS_BATCH_FILE') # Path to the EFS file that we wish to process
keyFile = os.getenv('SEQ_KEY_FILE') #Path to the file that contains the sequence hash and id
bucketName = os.getenv('HERON_SAMPLES_BUCKET')
heronSequencesTableName = os.getenv("HERON_SEQUENCES_TABLE")
batchUUID = os.path.splitext(os.path.basename(seqFile))[0].replace("sequences_", "")
print(f"Processing seqBatchFile: {seqConsensusFile}")
# Create the AWS resources: S3Bucket, dynamoDB Table, etc...
s3 = boto3.resource('s3', region_name='eu-west-1')
bucket = s3.Bucket(bucketName)
dynamodb = boto3.resource('dynamodb', region_name="eu-west-1", config=config)
sequencesTable = dynamodb.Table(heronSequencesTableName)
# Print the pango version
print(f"Pangolin D Version")
command = ["pangolin", "-dv"]
subprocess.run(command)
print(f"Pangolin Version")
command = ["pangolin", "-v"]
subprocess.run(command)
print(f"PangoLearn Version")
command = ["pangolin", "-pv"]
subprocess.run(command)
command = ["pangolin", "--verbose", "--usher", seqConsensusFile, "--outfile", "/tmp/output.csv", "--alignment"]
print(f"Running Command: {command}")
subprocess.run(command)
S3Key = f"pangolin/testOutput.csv"
bucket.upload_file("/tmp/output.csv", S3Key)
lineageDf = pd.read_csv("/tmp/output.csv")
lineageDf['taxon'] = [f">{f}" for f in lineageDf['taxon']]
keyFileDf = pd.read_json(keyFile, orient="records")
joinedDf = pd.merge(lineageDf, keyFileDf, left_on="taxon", right_on="seqId", how="inner")
callDate = int(datetime(datetime.now().year, datetime.now().month, datetime.now().day, 0, 0, 0).timestamp())
updateCount = 0
for index, row in joinedDf.iterrows():
seqHash = row["seqHash"]
lineage = row["lineage"]
seqId = row['seqId']
# Create query for dynamoDB
sequencesTable = dynamodb.Table(heronSequencesTableName)
response = sequencesTable.query(KeyConditionExpression=Key('seqHash').eq(seqHash))
if 'Items' in response:
if len(response['Items']) == 1:
item = response['Items'][0]
print(f"Updating: {seqHash}")
ret = sequencesTable.update_item(
Key={'seqHash': seqHash},
UpdateExpression="set pangoUsherLineage=:l, pangoUsherCallDate=:d, pangoCalled=:p",
ExpressionAttributeValues={
':l': lineage,
':d': callDate,
':p': 'true'
}
)
updateCount += 1
print(f"Updated {updateCount} out of {len(joinedDf)}")
print(f"keyFileDf length: {len(keyFileDf)}")
print(f"lineageDf length: {len(lineageDf)}")
print(f"JoinedDf length: {len(joinedDf)}")
|
|
import scipy.ndimage as ndimg
import numpy as np
from imagepy.core.engine import Filter, Simple
from geonumpy.pretreat import degap
class GapRepair(Simple):
title = 'Gap Repair'
note = ['all', 'preview']
para = {'wild':0, 'r':0, 'dark':True, 'every':True, 'slice':False}
view = [(float, 'wild', (-65536, 65536), 0, 'wild', 'value'),
(int, 'r', (0,1024), 0, 'radius', 'pix'),
(bool, 'dark', 'dark'),
(bool, 'every', 'count msk for every slice'),
(bool, 'slice', 'slice')]
def load(self, ips):
self.arange = ips.range
self.lut = ips.lut
ips.lut = self.lut.copy()
return True
def preview(self, ips, para):
ips.lut[:] = self.lut
thr = int((para['wild']-self.arange[0])*(
255.0/max(1e-10, self.arange[1]-self.arange[0])))
if para['dark']: ips.lut[:thr] = [255,0,0]
else: ips.lut[thr:] = [255,0,0]
ips.update()
def cancel(self, ips):
ips.lut = self.lut
ips.update()
def run(self, ips, imgs, para = None):
if not para['slice']:
ips.snapshot()
imgs = [ips.img]
if para['every']:
for i in range(len(imgs)):
img = imgs[i]
self.progress(i+1, len(imgs))
msk = img<para['wild'] if para['dark'] else img>=para['wild']
gap_repair(img, msk, para['r'])
else:
msk = ips.img<para['wild'] if para['dark'] else ips.img>=para['wild']
gap_repair(imgs, msk, para['r'])
ips.lut = self.lut
class ROIRepairMC(Simple):
title = 'ROI Repair Channels'
note = ['all', 'stack']
para = {'r':0, 'slice':True}
view = [(int, 'r', (0, 1024), 0, 'radius', 'pix'),
(bool, 'slice', 'slice')]
def run(self, ips, imgs, para = None):
if not(para['slice']):
ips.snapshot()
imgs = [ips.img]
msk = ips.get_msk('in')
gap_repair(imgs, msk, para['r'])
plgs = [GapRepair, ROIRepairMC]
|
|
import itertools,math
import numpy as np
from scipy.stats import binom_test
try:
from pybedtools import BedTool
except:
print("Pybedtools not imported")
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def plot_styler():
ax = plt.subplot(111)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
return
def barplot_gen(strand1,strand2,name1,name2,output):
"""
This should be an option for the user if he wants to generate vizualizations too.
"""
plot_styler()
plt.bar(range(1,3),[strand1,strand2],align="center")
plt.xticks(range(1,3),[name1,name2])
plt.ylabel("Occurrences")
plt.xlabel("Strand Orientation")
plt.tight_layout()
plt.savefig(output)
plt.close()
return
def barplot_pair_lists_gen(x_tickL,List1,List2,name1,name2,x_label,title_legend,output):
"""
This should be an option for the user if he wants to generate vizualizations too.
"""
plot_styler()
plt.bar(range(1,len(List1)*3+1,3),List1,label=name1,align="center")
plt.bar(range(2,len(List2)*3+1,3),List2,label=name2,align="center")
plt.xticks(range(1,len(List1)*3+1,3),x_tickL,rotation=90,fontsize=9)
plt.ylabel("Occurrences")
plt.xlabel(x_label)
plt.legend(frameon=False,title=title_legend)
plt.tight_layout()
plt.savefig(output)
plt.close()
return
def barplot_single_gen(List1,List1_names,y_label,x_label,output):
"""
This should be an option for the user if he wants to generate vizualizations too.
"""
plot_styler()
plt.bar(range(1,len(List1)*1+1,1),List1,align="center")
plt.xticks(range(1,len(List1)*3+1),[List1_names[k] for k in range(0,len(List1_names)*1,3)],rotation=90)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.tight_layout()
plt.savefig(output)
plt.close()
return
def heatmap_gen(DataLL,DataLL_control,BinsL,output):
try:
import seaborn as sns
except:
print("seaborn not imported")
try:
import pandas as pd
except:
print("pandas not imported")
for k in range(len(DataLL)):
if DataLL[k]==[]:
DataLL[k]={};
for k in range(len(DataLL_control)):
if DataLL_control[k]==[]:
DataLL_control[k]={};
all_cons = [max(k.keys()) for k in DataLL if k!={}]
if all_cons==[]:
return
else:
max_cons = max(all_cons)
RatioLL=[]
for i in range(len(DataLL)):
RatioL=[];
for k in range(1,max_cons+1):
if k in DataLL_control[i].keys():
if float(DataLL_control[i][k])!=0 and float(DataLL[i][k])!=0:
RatioL.append(math.log10(DataLL[i][k]/float(DataLL_control[i][k])))
else:
RatioL.append(np.nan)
else:
RatioL.append(np.nan)
RatioLL.append(RatioL)
df = pd.DataFrame(np.array(RatioLL),index=BinsL)
mask = df.isnull()
sns.heatmap(df,cbar_kws={'label': 'log(Enrichment)'}, mask=mask)
plt.xlabel("Consecutive occurrences")
plt.ylabel("Distance bins")
plt.tight_layout()
plt.savefig(output)
plt.close()
return
def distribution_gen(occsL,occsL_control,output):
"""
This function calculates the distance of consecutive patterns and plots it for both the real data and the controls.
"""
from collections import Counter
plot_styler()
Distances_consecutiveL = [occsL[k+1]-occsL[k] for k in range(len(occsL)-1)]
Distances_consecutive_controlL = [occsL_control[k+1]-occsL_control[k] for k in range(len(occsL_control)-1)]
Distances_consecutiveD = Counter(Distances_consecutiveL).most_common()
Distances_consecutive_controlD = Counter(Distances_consecutive_controlL).most_common()
plt.plot([k[0] for k in Distances_consecutiveD],[m[1] for m in Distances_consecutiveD],"o",markersize=2,label="Observed")
plt.plot([k[0] for k in Distances_consecutive_controlD],[m[1] for m in Distances_consecutive_controlD],"o",markersize=2,label="Expected")
plt.xlabel("Distance of consecutive")
plt.ylabel("Occurrences")
plt.legend(frameon=False)
plt.savefig(output)
plt.close()
return
def distnace_distribution_gen(same_strandL_distance,opposite_strandL_distance,name1,name2,min_dist,max_dist,output):
try:
import seaborn as sns
except:
print("seaborn not imported")
plot_styler()
plt.hist(same_strandL_distance,50,histtype='step',label=name1)
plt.hist(opposite_strandL_distance,50,histtype='step',label=name2)
plt.xlabel("Distance")
plt.ylabel("Occurrences")
plt.xlim(min_dist,max_dist)
plt.legend(frameon=False,title="Orientation")
plt.savefig(output)
plt.close()
|
|
"""
created matt_dumont
on: 15/02/22
"""
import flopy
import numpy as np
from ci_framework import FlopyTestSetup, base_test_dir
import platform
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
nrow = 3
ncol = 4
nlay = 2
nper = 1
l1_ibound = np.array([[[-1, -1, -1, -1],
[-1, 1, 1, -1],
[-1, -1, -1, -1]]])
l2_ibound = np.ones((1, nrow, ncol))
l2_ibound_alt = np.ones((1, nrow, ncol))
l2_ibound_alt[0, 0, 0] = 0
ibound = {
'mf1': np.concatenate((l1_ibound, l2_ibound), axis=0), # constant heads around model on top row
'mf2': np.concatenate((l1_ibound, l2_ibound_alt), axis=0), # constant heads around model on top row
}
laytype = {
'mf1': [0, 1],
'mf2': [0, 0]
}
hnoflow = -888
hdry = -777
top = np.zeros((1, nrow, ncol)) + 10
bt1 = np.ones((1, nrow, ncol)) + 5
bt2 = np.ones((1, nrow, ncol)) + 3
botm = np.concatenate((bt1, bt2), axis=0)
ipakcb = 740
names = ['mf1', 'mf2']
exe_names = {"mf2005": "mf2005", "mf6": "mf6", "mp7": "mp7"}
run = True
for key in exe_names.keys():
v = flopy.which(exe_names[key])
if v is None:
run = False
break
mf2005_exe = "mf2005"
if platform.system() in "Windows":
mf2005_exe += ".exe"
mf2005_exe = flopy.which(mf2005_exe)
mp6_exe = "mp6"
if platform.system() in "Windows":
mp6_exe += ".exe"
mp6_exe = flopy.which(mp6_exe)
def _setup_modflow_model(nm, ws):
m = flopy.modflow.Modflow(
modelname=f"modflowtest_{nm}",
namefile_ext="nam",
version="mf2005",
exe_name=mf2005_exe,
model_ws=ws,
)
# dis
dis = flopy.modflow.ModflowDis(
model=m,
nlay=nlay,
nrow=nrow,
ncol=ncol,
nper=nper,
delr=1.0,
delc=1.0,
laycbd=0,
top=top,
botm=botm,
perlen=1,
nstp=1,
tsmult=1,
steady=True,
)
# bas
bas = flopy.modflow.ModflowBas(
model=m,
ibound=ibound[nm],
strt=10,
ifrefm=True,
ixsec=False,
ichflg=False,
stoper=None,
hnoflo=hnoflow,
extension="bas",
unitnumber=None,
filenames=None,
)
# lpf
lpf = flopy.modflow.ModflowLpf(
model=m,
ipakcb=ipakcb,
laytyp=laytype[nm],
hk=10,
vka=10,
hdry=hdry
)
# well
wel = flopy.modflow.ModflowWel(
model=m,
ipakcb=ipakcb,
stress_period_data={0: [[1, 1, 1, -5.]]},
)
flopy.modflow.ModflowPcg(m, hclose=0.001, rclose=0.001,
mxiter=150, iter1=30,
)
ocspd = {}
for p in range(nper):
ocspd[(p, 0)] = ['save head', 'save budget']
ocspd[(0, 0)] = ['save head', 'save budget'] # pretty sure it just uses the last for everything
flopy.modflow.ModflowOc(m, stress_period_data=ocspd)
m.write_input()
if run:
success, buff = m.run_model()
assert success
return m
def test_data_pass_no_modflow():
"""
test that user can pass and create a mp model without an accompanying modflow model
Returns
-------
"""
ws = f"{base_dir}_test_mp_no_modflow"
test_setup = FlopyTestSetup(verbose=True, test_dirs=ws)
dis_file = f"modflowtest_mf1.dis"
bud_file = f"modflowtest_mf1.cbc"
hd_file = f"modflowtest_mf1.hds"
m1 = _setup_modflow_model('mf1', ws)
mp = flopy.modpath.Modpath6(
modelname="modpathtest",
simfile_ext="mpsim",
namefile_ext="mpnam",
version="modpath",
exe_name=mp6_exe,
modflowmodel=None, # do not pass modflow model
dis_file=dis_file,
head_file=hd_file,
budget_file=bud_file,
model_ws=ws,
external_path=None,
verbose=False,
load=True,
listunit=7,
)
assert mp.head_file == hd_file
assert mp.budget_file == bud_file
assert mp.dis_file == dis_file
assert mp.nrow_ncol_nlay_nper == (nrow, ncol, nlay, nper)
mpbas = flopy.modpath.Modpath6Bas(
mp,
hnoflo=hnoflow,
hdry=hdry,
def_face_ct=0,
bud_label=None,
def_iface=None,
laytyp=laytype['mf1'],
ibound=ibound['mf1'],
prsity=0.30,
prsityCB=0.30,
extension="mpbas",
unitnumber=86,
)
# test layertype is created correctly
assert np.isclose(mpbas.laytyp.array, laytype['mf1']).all()
# test ibound is pulled from modflow model
assert np.isclose(mpbas.ibound.array, ibound['mf1']).all()
sim = flopy.modpath.Modpath6Sim(model=mp)
stl = flopy.modpath.mp6sim.StartingLocationsFile(model=mp)
stldata = stl.get_empty_starting_locations_data(npt=2)
stldata["label"] = ["p1", "p2"]
stldata[1]["k0"] = 0
stldata[1]["i0"] = 0
stldata[1]["j0"] = 0
stldata[1]["xloc0"] = 0.1
stldata[1]["yloc0"] = 0.2
stl.data = stldata
mp.write_input()
if run:
success, buff = mp.run_model()
assert success
def test_data_pass_with_modflow():
"""
test that user specified head files etc. are preferred over files from the modflow model
Returns
-------
"""
ws = f"{base_dir}_test_mp_with_modflow"
test_setup = FlopyTestSetup(verbose=True, test_dirs=ws)
dis_file = f"modflowtest_mf1.dis"
bud_file = f"modflowtest_mf1.cbc"
hd_file = f"modflowtest_mf1.hds"
m1 = _setup_modflow_model('mf1', ws)
m2 = _setup_modflow_model('mf2', ws)
mp = flopy.modpath.Modpath6(
modelname="modpathtest",
simfile_ext="mpsim",
namefile_ext="mpnam",
version="modpath",
exe_name=mp6_exe,
modflowmodel=m2, # do not pass modflow model
dis_file=dis_file,
head_file=hd_file,
budget_file=bud_file,
model_ws=ws,
external_path=None,
verbose=False,
load=False,
listunit=7,
)
assert mp.head_file == hd_file
assert mp.budget_file == bud_file
assert mp.dis_file == dis_file
assert mp.nrow_ncol_nlay_nper == (nrow, ncol, nlay, nper)
mpbas = flopy.modpath.Modpath6Bas(
mp,
hnoflo=hnoflow,
hdry=hdry,
def_face_ct=0,
bud_label=None,
def_iface=None,
laytyp=laytype['mf1'],
ibound=ibound['mf1'],
prsity=0.30,
prsityCB=0.30,
extension="mpbas",
unitnumber=86,
)
# test layertype is created correctly!
assert np.isclose(mpbas.laytyp.array, laytype['mf1']).all()
# test ibound is pulled from modflow model
assert np.isclose(mpbas.ibound.array, ibound['mf1']).all()
sim = flopy.modpath.Modpath6Sim(model=mp)
stl = flopy.modpath.mp6sim.StartingLocationsFile(model=mp)
stldata = stl.get_empty_starting_locations_data(npt=2)
stldata["label"] = ["p1", "p2"]
stldata[1]["k0"] = 0
stldata[1]["i0"] = 0
stldata[1]["j0"] = 0
stldata[1]["xloc0"] = 0.1
stldata[1]["yloc0"] = 0.2
stl.data = stldata
mp.write_input()
if run:
success, buff = mp.run_model()
assert success
def test_just_from_model():
"""
test that user specified head files etc. are preferred over files from the modflow model
Returns
-------
"""
ws = f"{base_dir}_test_mp_only_modflow"
test_setup = FlopyTestSetup(verbose=True, test_dirs=ws)
dis_file = f"modflowtest_mf2.dis"
bud_file = f"modflowtest_mf2.cbc"
hd_file = f"modflowtest_mf2.hds"
m1 = _setup_modflow_model('mf1', ws)
m2 = _setup_modflow_model('mf2', ws)
mp = flopy.modpath.Modpath6(
modelname="modpathtest",
simfile_ext="mpsim",
namefile_ext="mpnam",
version="modpath",
exe_name=mp6_exe,
modflowmodel=m2, # do not pass modflow model
dis_file=None,
head_file=None,
budget_file=None,
model_ws=ws,
external_path=None,
verbose=False,
load=False,
listunit=7,
)
assert mp.head_file == hd_file
assert mp.budget_file == bud_file
assert mp.dis_file == dis_file
assert mp.nrow_ncol_nlay_nper == (nrow, ncol, nlay, nper)
mpbas = flopy.modpath.Modpath6Bas(
mp,
hnoflo=hnoflow,
hdry=hdry,
def_face_ct=0,
bud_label=None,
def_iface=None,
laytyp=None,
ibound=None,
prsity=0.30,
prsityCB=0.30,
extension="mpbas",
unitnumber=86,
)
# test layertype is created correctly!
assert np.isclose(mpbas.laytyp.array, laytype['mf2']).all()
# test ibound is pulled from modflow model
assert np.isclose(mpbas.ibound.array, ibound['mf2']).all()
sim = flopy.modpath.Modpath6Sim(model=mp)
stl = flopy.modpath.mp6sim.StartingLocationsFile(model=mp)
stldata = stl.get_empty_starting_locations_data(npt=2)
stldata["label"] = ["p1", "p2"]
stldata[1]["k0"] = 0
stldata[1]["i0"] = 0
stldata[1]["j0"] = 0
stldata[1]["xloc0"] = 0.1
stldata[1]["yloc0"] = 0.2
stl.data = stldata
mp.write_input()
if run:
success, buff = mp.run_model()
assert success
if __name__ == '__main__':
pass
test_data_pass_no_modflow()
|
|
import numpy as np
from scipy.optimize import minimize
import networkx as nx
from code.miscellaneous.utils import flatten_listlist
from scipy.sparse.csgraph import connected_components
from code.Modality.DensityEstKNN import DensityEstKNN
from code.NoiseRemoval.ClusterGMM import gmm_cut
from code.Graph.extract_neighbors import neighboring_modes
from code.Graph.GabrielGraph import gabriel_graph_adjacency
from code.NoiseRemoval.OptimalVelocity import optimize_velocity, transform_velocity, transform_velocity_diff
def remove_noise(data, cluster_bool_arr, G, pos_cols, labels, density, nb_neigh_denstiy,
data_full, ra_col, dec_col, plx_col, pmra_col, pmdec_col, rv_col, rv_err_col,
uvw_cols=None, radius=20
):
"""Remove noise for a given cluster
:param data: full data set
:param cluster_bool_arr: bool array highlighting the cluster
:param G: the MST graph describing the modes and their connection via saddle points
:param pos_cols: poition columns (needed for combination of new feature space)
:param labels: labels for each initial mode appearing in the data set
:param density: point density estimate, usually via KNN density estimation
:param nb_neigh_denstiy: number of neighbors to use for denstiy estimation
"""
data_idx = np.arange(data.shape[0])
# Get densest components in the given cluster
_, cluster_labels, _ = gmm_cut(density[cluster_bool_arr], n_components=2)
# get labels of local cluster mode containing the peak
cluster_modes_dense = np.unique(labels[data_idx[cluster_bool_arr][cluster_labels]])
# extract connected components from cluster_modes_dense (via G)
nbs_saddle = np.array(flatten_listlist([list(int(n) for n in G.neighbors(cmd)) for cmd in cluster_modes_dense]))
nodes_to_search = np.union1d(cluster_modes_dense, nbs_saddle)
dense_subgraph = G.subgraph(nodes_to_search)
largest_cc = np.array(list(max(nx.connected_components(dense_subgraph), key=len)), dtype=int)
cluster_modes_dense = np.intersect1d(largest_cc, labels)
# Get modes surrounding the dense cluster core
nbs_modes = neighboring_modes(cluster_modes_dense, G, nb_neighbors=1)
# Remove neighboring nodes that are not in the cluster
nbs_modes = np.intersect1d(nbs_modes, np.unique(labels[cluster_bool_arr]))
cut_filter = np.isin(labels, nbs_modes) # filtered points: modal and surrounding regions
rho_fitlered = density[cut_filter] # get density of filtered points
_, cluster_labels_filter, _ = gmm_cut(rho_fitlered, n_components=2) # dense core points of this region
cut_dense_core = data_idx[cut_filter][cluster_labels_filter] # translate bool arr to data index
# Compute gabriel graph of modal and surrounding regions
ajm = gabriel_graph_adjacency(data.loc[cut_filter])
# ---- Compute "optimal" cartesian velocity ----
# Prepare data
cols = [ra_col, dec_col, plx_col, pmra_col, pmdec_col, rv_col, rv_err_col]
ra, dec, plx, pmra, pmdec, rv, rv_err = data_full.loc[cut_dense_core, cols].values.T
# Prepare initial guess
mean_uvw = np.zeros(3)
if uvw_cols is not None:
mean_uvw = np.mean(data_full.loc[cut_dense_core, uvw_cols], axis=0)
# Compute optimal velocity
sol = optimize_velocity(ra, dec, plx, pmra, pmdec, rv, rv_err, init_guess=mean_uvw, do_minimize=True)
optimal_vel = sol.x
# Compute propermotions under given optimal 3D velocity of full sample
ra, dec, plx, pmra, pmdec, rv, rv_err = data_full.loc[
cut_filter, [ra_col, dec_col, plx_col, pmra_col, pmdec_col, rv_col, rv_err_col]].values.T
# Find best fitting rvs for given data
# calculate rv for cases without rv estimations or very large errors
idx_arr = np.arange(rv.size)
rv_isnan_or_large_err = np.isnan(rv) | (np.abs(rv / rv_err) < 2) # for large errors find better suited rvs
list_op_rvs = []
for i in idx_arr[rv_isnan_or_large_err]:
opt_rv = minimize(fun=transform_velocity_diff, x0=0.,
args=(ra[i], dec[i], plx[i], pmra[i], pmdec[i], optimal_vel))
list_op_rvs.append(opt_rv.x[0])
# Set optimal rv's
rv_computed = np.copy(rv)
rv_computed[rv_isnan_or_large_err] = np.array(list_op_rvs)
# Transform to uvw
uvw_computed = transform_velocity(ra, dec, plx, pmra, pmdec, rv_computed)
# only care about velocities near the optimal velocity -> others have too different space velocity
uvw_calc_diff = np.linalg.norm(uvw_computed - optimal_vel, axis=1)
# differences larger than radius (default=20) are very likely not part of stellar system
cut_uvw_diff = uvw_calc_diff < radius
# Prepare bool array for data
data_idx = np.arange(data_full.shape[0])
cluster_member_arr = np.zeros(data_full.shape[0], dtype=int)
# Scale XYZ:
# scales range from ~2-10 assuming the density in velocity is constant
# while the space density can vary from a dense core to a less dense corona
for scale in np.linspace(2, 10, 20):
xyzuvw = np.c_[data_full.loc[cut_filter, pos_cols].values / scale, uvw_computed]
# Compute densities
duvw = DensityEstKNN(xyzuvw, nb_neigh_denstiy)
rho_uvw = duvw.knn_density(nb_neigh_denstiy)
# Predict membership via GMM with 2 components
_, cut_gmm_xyzuvw, _ = gmm_cut(rho_uvw[cut_uvw_diff])
# Extract connected component from dense component
_, cc_idx = connected_components(ajm[cut_gmm_xyzuvw, :][:, cut_gmm_xyzuvw])
# Combine CCs data points with originally defined dense core (to not miss out on potentially dropped points)
cluster_indices = data_idx[cut_filter][cut_uvw_diff][cut_gmm_xyzuvw][cc_idx == np.argmax(np.bincount(cc_idx))]
cluster_member_arr[cluster_indices] += 1
return cluster_member_arr
def remove_noise_simple(data, cluster_bool_arr, G, labels, density):
"""Remove noise with only gmms"""
data_idx = np.arange(data.shape[0])
# Get densest components in the given cluster
_, cluster_labels, _ = gmm_cut(density[cluster_bool_arr], n_components=2)
# get labels of local cluster mode containing the peak
cluster_modes_dense = np.unique(labels[data_idx[cluster_bool_arr][cluster_labels]])
# extract connected components from cluster_modes_dense (via G)
nbs_saddle = np.array(flatten_listlist([list(int(n) for n in G.neighbors(cmd)) for cmd in cluster_modes_dense]))
nodes_to_search = np.union1d(cluster_modes_dense, nbs_saddle)
dense_subgraph = G.subgraph(nodes_to_search)
largest_cc = np.array(list(max(nx.connected_components(dense_subgraph), key=len)), dtype=int)
cluster_modes_dense = np.intersect1d(largest_cc, labels)
# Get modes surrounding the dense cluster core
nbs_modes = neighboring_modes(cluster_modes_dense, G, nb_neighbors=2)
# Remove neighboring nodes that are not in the cluster
nbs_modes = np.intersect1d(nbs_modes, np.unique(labels[cluster_bool_arr]))
cut_filter = np.isin(labels, nbs_modes) # filtered points: modal and surrounding regions
rho_fitlered = density[cut_filter] # get density of filtered points
_, cluster_labels_filter, _ = gmm_cut(rho_fitlered, n_components=2) # dense core points of this region
cut_dense_core = data_idx[cut_filter][cluster_labels_filter] # translate bool arr to data index
# Compute gabriel graph of modal and surrounding regions
ajm = gabriel_graph_adjacency(data.loc[cut_filter])
_, cc_idx = connected_components(ajm[cluster_labels_filter, :][:, cluster_labels_filter])
# Combine CCs data points with originally defined dense core (to not miss out on potentially dropped points)
cluster_indices = data_idx[cut_filter][cluster_labels_filter][cc_idx == np.argmax(np.bincount(cc_idx))]
return np.isin(data_idx, cluster_indices)
|
|
#!/usr/bin/env python
u"""
MPI_reduce_ICESat2_ATL11_RGI.py
Written by Tyler Sutterley (10/2021)
Create masks for reducing ICESat-2 data to the Randolph Glacier Inventory
https://www.glims.org/RGI/rgi60_dl.html
COMMAND LINE OPTIONS:
-D X, --directory X: Working Data Directory
-R X, --region X: region of Randolph Glacier Inventory to run
1: Alaska
2: Western Canada and USA
3: Arctic Canada North
4: Arctic Canada South
5: Greenland Periphery
6: Iceland
7: Svalbard
8: Scandinavia
9: Russian Arctic
10: North Asia
11: Central Europe
12: Caucasus, Middle East
13: Central Asia
14: South Asia West
15: South Asia East
16: Low Latitudes
17: Southern Andes
18: New Zealand
19: Antarctic, Subantarctic
-V, --verbose: Output information about each created file
-M X, --mode X: Permission mode of directories and files created
REQUIRES MPI PROGRAM
MPI: standardized and portable message-passing system
https://www.open-mpi.org/
http://mpitutorial.com/
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
mpi4py: MPI for Python
http://pythonhosted.org/mpi4py/
http://mpi4py.readthedocs.org/en/stable/
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://h5py.org
http://docs.h5py.org/en/stable/mpi.html
shapely: PostGIS-ish operations outside a database context for Python
http://toblerity.org/shapely/index.html
pyshp: Python read/write support for ESRI Shapefile format
https://github.com/GeospatialPython/pyshp
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
PROGRAM DEPENDENCIES:
convert_delta_time.py: converts from delta time into Julian and year-decimal
time.py: Utilities for calculating time operations
utilities.py: download and management utilities for syncing files
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
added parsing for converting file lines to arguments
Updated 05/2021: print full path of output filename
Updated 02/2021: replaced numpy bool/int to prevent deprecation warnings
Updated 01/2021: time utilities for converting times from JD and to decimal
Written 12/2020
"""
from __future__ import print_function
import sys
import os
import re
import io
import h5py
import logging
import zipfile
import datetime
import argparse
import shapefile
import numpy as np
import collections
from mpi4py import MPI
from shapely.geometry import MultiPoint, Polygon
from icesat2_toolkit.convert_delta_time import convert_delta_time
import icesat2_toolkit.time
import icesat2_toolkit.utilities
#-- PURPOSE: keep track of MPI threads
def info(rank, size):
logging.info('Rank {0:d} of {1:d}'.format(rank+1,size))
logging.info('module name: {0}'.format(__name__))
if hasattr(os, 'getppid'):
logging.info('parent process: {0:d}'.format(os.getppid()))
logging.info('process id: {0:d}'.format(os.getpid()))
#-- PURPOSE: load zip file containing Randolph Glacier Inventory shapefiles
def load_glacier_inventory(RGI_DIRECTORY,RGI_REGION):
#-- list of Randolph Glacier Inventory files
RGI_files = []
RGI_files.append('01_rgi60_Alaska')
RGI_files.append('02_rgi60_WesternCanadaUS')
RGI_files.append('03_rgi60_ArcticCanadaNorth')
RGI_files.append('04_rgi60_ArcticCanadaSouth')
RGI_files.append('05_rgi60_GreenlandPeriphery')
RGI_files.append('06_rgi60_Iceland')
RGI_files.append('07_rgi60_Svalbard')
RGI_files.append('08_rgi60_Scandinavia')
RGI_files.append('09_rgi60_RussianArctic')
RGI_files.append('10_rgi60_NorthAsia')
RGI_files.append('11_rgi60_CentralEurope')
RGI_files.append('12_rgi60_CaucasusMiddleEast')
RGI_files.append('13_rgi60_CentralAsia')
RGI_files.append('14_rgi60_SouthAsiaWest')
RGI_files.append('15_rgi60_SouthAsiaEast')
RGI_files.append('16_rgi60_LowLatitudes')
RGI_files.append('17_rgi60_SouthernAndes')
RGI_files.append('18_rgi60_NewZealand')
RGI_files.append('19_rgi60_AntarcticSubantarctic')
#-- read input zipfile containing RGI shapefiles
zs = zipfile.ZipFile(os.path.join(RGI_DIRECTORY,
'{0}.zip'.format(RGI_files[RGI_REGION-1])))
dbf,prj,shp,shx = [io.BytesIO(zs.read(s)) for s in sorted(zs.namelist())
if re.match(r'(.*?)\.(dbf|prj|shp|shx)$',s)]
#-- read the shapefile and extract entities
shape_input = shapefile.Reader(dbf=dbf, prj=prj, shp=shp, shx=shx,
encodingErrors='ignore')
shape_entities = shape_input.shapes()
shape_attributes = shape_input.records()
#-- extract the RGI entities
poly_dict = {}
for i,att in enumerate(shape_attributes):
#-- extract latitude and longitude coordinates for entity
points = np.array(shape_entities[i].points)
#-- entities can have multiple parts
parts = shape_entities[i].parts
parts.append(len(points))
#-- list object for coordinates (exterior and holes)
poly_list = []
#-- add each part to list
for p1,p2 in zip(parts[:-1],parts[1:]):
poly_list.append(list(zip(points[p1:p2,0],points[p1:p2,1])))
#-- convert poly_list into Polygon object with holes
poly_obj = Polygon(poly_list[0],poly_list[1:])
#-- Valid Polygon may not possess overlapping exterior or interior rings
if (not poly_obj.is_valid):
poly_obj = poly_obj.buffer(0)
#-- add to dictionary based on RGI identifier
poly_dict[att[0]] = poly_obj
#-- close the zipfile
zs.close()
#-- return the dictionary of polygon objects and the input file
return (poly_dict, RGI_files[RGI_REGION-1])
#-- PURPOSE: read ICESat-2 annual land ice height data (ATL11) from NSIDC
#-- reduce to the Randolph Glacier Inventory
def main():
#-- start MPI communicator
comm = MPI.COMM_WORLD
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Create masks for reducing ICESat-2 ATL11 annual land
ice height data to the Randolph Glacier Inventory (RGI)
""",
fromfile_prefix_chars="@"
)
parser.convert_arg_line_to_args = \
icesat2_toolkit.utilities.convert_arg_line_to_args
#-- command line parameters
parser.add_argument('file',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='ICESat-2 ATL11 file to run')
#-- working data directory for location of RGI files
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- region of Randolph Glacier Inventory to run
parser.add_argument('--region','-r',
metavar='RGI', type=int, choices=range(1,20),
help='region of Randolph Glacier Inventory to run')
#-- verbosity settings
#-- verbose will output information about each output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args,_ = parser.parse_known_args()
#-- create logger
loglevel = logging.INFO if args.verbose else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- output module information for process
info(comm.rank,comm.size)
if (comm.rank == 0):
logging.info('{0} -->'.format(args.file))
#-- Open the HDF5 file for reading
fileID = h5py.File(args.file, 'r', driver='mpio', comm=comm)
DIRECTORY = os.path.dirname(args.file)
#-- extract parameters from ICESat-2 ATLAS HDF5 file name
rx = re.compile(r'(processed_)?(ATL\d{2})_(\d{4})(\d{2})_(\d{2})(\d{2})_'
r'(\d{3})_(\d{2})(.*?).h5$')
SUB,PRD,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX = rx.findall(args.file).pop()
#-- read data on rank 0
if (comm.rank == 0):
#-- read RGI for region and create shapely polygon objects
poly_dict,RGI_file = load_glacier_inventory(args.directory,args.region)
else:
#-- create empty object for list of shapely objects
poly_dict = None
RGI_file = None
#-- Broadcast Shapely polygon objects
poly_dict = comm.bcast(poly_dict, root=0)
RGI_file = comm.bcast(RGI_file, root=0)
#-- RGI version and name
RGI_VERSION,RGI_NAME = re.findall(r'\d_rgi(\d+)_(.*?)$',RGI_file).pop()
#-- combined validity check for all beam pairs
valid_check = False
#-- read each input beam pair within the file
IS2_atl11_pairs = []
for ptx in [k for k in fileID.keys() if bool(re.match(r'pt\d',k))]:
#-- check if subsetted beam contains reference points
try:
fileID[ptx]['ref_pt']
except KeyError:
pass
else:
IS2_atl11_pairs.append(ptx)
#-- copy variables for outputting to HDF5 file
IS2_atl11_mask = {}
IS2_atl11_fill = {}
IS2_atl11_dims = {}
IS2_atl11_mask_attrs = {}
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
#-- Add this value to delta time parameters to compute full gps_seconds
IS2_atl11_mask['ancillary_data'] = {}
IS2_atl11_mask_attrs['ancillary_data'] = {}
for key in ['atlas_sdp_gps_epoch']:
#-- get each HDF5 variable
IS2_atl11_mask['ancillary_data'][key] = fileID['ancillary_data'][key][:]
#-- Getting attributes of group and included variables
IS2_atl11_mask_attrs['ancillary_data'][key] = {}
for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
IS2_atl11_mask_attrs['ancillary_data'][key][att_name] = att_val
#-- for each input beam pair within the file
for ptx in sorted(IS2_atl11_pairs):
#-- output data dictionaries for beam pair
IS2_atl11_mask[ptx] = dict(subsetting=collections.OrderedDict())
IS2_atl11_fill[ptx] = dict(subsetting={})
IS2_atl11_dims[ptx] = dict(subsetting={})
IS2_atl11_mask_attrs[ptx] = dict(subsetting={})
#-- number of average segments and number of included cycles
delta_time = fileID[ptx]['delta_time'][:].copy()
n_points,n_cycles = np.shape(delta_time)
#-- check if there are less segments than processes
if (n_points < comm.Get_size()):
continue
#-- define indices to run for specific process
ind = np.arange(comm.Get_rank(),n_points,comm.Get_size(),dtype=int)
#-- convert reduced lat/lon to shapely multipoint object
longitude = fileID[ptx]['longitude'][:].copy()
latitude = fileID[ptx]['latitude'][:].copy()
xy_point = MultiPoint(list(zip(longitude[ind],latitude[ind])))
#-- create distributed intersection map for calculation
distributed_map = np.zeros((n_points),dtype=bool)
distributed_RGIId = np.zeros((n_points),dtype='|S14')
#-- create empty intersection map array for receiving
associated_map = np.zeros((n_points),dtype=bool)
associated_RGIId = np.zeros((n_points),dtype='|S14')
for key,poly_obj in poly_dict.items():
#-- finds if points are encapsulated (within RGI polygon)
int_test = poly_obj.intersects(xy_point)
if int_test:
#-- extract intersected points
int_map = list(map(poly_obj.intersects,xy_point))
int_indices, = np.nonzero(int_map)
#-- set distributed_map indices to True for intersected points
distributed_map[ind[int_indices]] = True
distributed_RGIId[ind[int_indices]] = key
#-- communicate output MPI matrices between ranks
#-- operation is a logical "or" across the elements.
comm.Allreduce(sendbuf=[distributed_map, MPI.BOOL], \
recvbuf=[associated_map, MPI.BOOL], op=MPI.LOR)
#-- operation is a element summation.
comm.Allreduce(sendbuf=[distributed_RGIId, MPI.CHAR], \
recvbuf=[associated_RGIId, MPI.CHAR], op=MPI.SUM)
distributed_map = None
distributed_RGIId = None
#-- wait for all processes to finish calculation
comm.Barrier()
#-- add to validity check
valid_check |= np.any(associated_map)
#-- group attributes for beam pair
IS2_atl11_mask_attrs[ptx]['description'] = ('Contains the primary science parameters for this '
'data set')
IS2_atl11_mask_attrs[ptx]['beam_pair'] = fileID[ptx].attrs['beam_pair']
IS2_atl11_mask_attrs[ptx]['ReferenceGroundTrack'] = fileID[ptx].attrs['ReferenceGroundTrack']
IS2_atl11_mask_attrs[ptx]['first_cycle'] = fileID[ptx].attrs['first_cycle']
IS2_atl11_mask_attrs[ptx]['last_cycle'] = fileID[ptx].attrs['last_cycle']
IS2_atl11_mask_attrs[ptx]['equatorial_radius'] = fileID[ptx].attrs['equatorial_radius']
IS2_atl11_mask_attrs[ptx]['polar_radius'] = fileID[ptx].attrs['polar_radius']
#-- geolocation, time and reference point
#-- reference point
IS2_atl11_mask[ptx]['ref_pt'] = fileID[ptx]['ref_pt'][:].copy()
IS2_atl11_fill[ptx]['ref_pt'] = None
IS2_atl11_dims[ptx]['ref_pt'] = None
IS2_atl11_mask_attrs[ptx]['ref_pt'] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['ref_pt']['units'] = "1"
IS2_atl11_mask_attrs[ptx]['ref_pt']['contentType'] = "referenceInformation"
IS2_atl11_mask_attrs[ptx]['ref_pt']['long_name'] = "Reference point number"
IS2_atl11_mask_attrs[ptx]['ref_pt']['source'] = "ATL06"
IS2_atl11_mask_attrs[ptx]['ref_pt']['description'] = ("The reference point is the 7 "
"digit segment_id number corresponding to the center of the ATL06 data used for "
"each ATL11 point. These are sequential, starting with 1 for the first segment "
"after an ascending equatorial crossing node.")
IS2_atl11_mask_attrs[ptx]['ref_pt']['coordinates'] = \
"delta_time latitude longitude"
#-- cycle_number
IS2_atl11_mask[ptx]['cycle_number'] = fileID[ptx]['cycle_number'][:].copy()
IS2_atl11_fill[ptx]['cycle_number'] = None
IS2_atl11_dims[ptx]['cycle_number'] = None
IS2_atl11_mask_attrs[ptx]['cycle_number'] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['cycle_number']['units'] = "1"
IS2_atl11_mask_attrs[ptx]['cycle_number']['long_name'] = "Orbital cycle number"
IS2_atl11_mask_attrs[ptx]['cycle_number']['source'] = "ATL06"
IS2_atl11_mask_attrs[ptx]['cycle_number']['description'] = ("Number of 91-day periods "
"that have elapsed since ICESat-2 entered the science orbit. Each of the 1,387 "
"reference ground track (RGTs) is targeted in the polar regions once "
"every 91 days.")
#-- delta time
IS2_atl11_mask[ptx]['delta_time'] = fileID[ptx]['delta_time'][:].copy()
IS2_atl11_fill[ptx]['delta_time'] = fileID[ptx]['delta_time'].attrs['_FillValue']
IS2_atl11_dims[ptx]['delta_time'] = ['ref_pt','cycle_number']
IS2_atl11_mask_attrs[ptx]['delta_time'] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['delta_time']['units'] = "seconds since 2018-01-01"
IS2_atl11_mask_attrs[ptx]['delta_time']['long_name'] = "Elapsed GPS seconds"
IS2_atl11_mask_attrs[ptx]['delta_time']['standard_name'] = "time"
IS2_atl11_mask_attrs[ptx]['delta_time']['calendar'] = "standard"
IS2_atl11_mask_attrs[ptx]['delta_time']['source'] = "ATL06"
IS2_atl11_mask_attrs[ptx]['delta_time']['description'] = ("Number of GPS "
"seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset "
"is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
"between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By "
"adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the "
"time in gps_seconds relative to the GPS epoch can be computed.")
IS2_atl11_mask_attrs[ptx]['delta_time']['coordinates'] = \
"ref_pt cycle_number latitude longitude"
#-- latitude
IS2_atl11_mask[ptx]['latitude'] = fileID[ptx]['latitude'][:].copy()
IS2_atl11_fill[ptx]['latitude'] = fileID[ptx]['latitude'].attrs['_FillValue']
IS2_atl11_dims[ptx]['latitude'] = ['ref_pt']
IS2_atl11_mask_attrs[ptx]['latitude'] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['latitude']['units'] = "degrees_north"
IS2_atl11_mask_attrs[ptx]['latitude']['contentType'] = "physicalMeasurement"
IS2_atl11_mask_attrs[ptx]['latitude']['long_name'] = "Latitude"
IS2_atl11_mask_attrs[ptx]['latitude']['standard_name'] = "latitude"
IS2_atl11_mask_attrs[ptx]['latitude']['source'] = "ATL06"
IS2_atl11_mask_attrs[ptx]['latitude']['description'] = ("Center latitude of "
"selected segments")
IS2_atl11_mask_attrs[ptx]['latitude']['valid_min'] = -90.0
IS2_atl11_mask_attrs[ptx]['latitude']['valid_max'] = 90.0
IS2_atl11_mask_attrs[ptx]['latitude']['coordinates'] = \
"ref_pt delta_time longitude"
#-- longitude
IS2_atl11_mask[ptx]['longitude'] = fileID[ptx]['longitude'][:].copy()
IS2_atl11_fill[ptx]['longitude'] = fileID[ptx]['longitude'].attrs['_FillValue']
IS2_atl11_dims[ptx]['longitude'] = ['ref_pt']
IS2_atl11_mask_attrs[ptx]['longitude'] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['longitude']['units'] = "degrees_east"
IS2_atl11_mask_attrs[ptx]['longitude']['contentType'] = "physicalMeasurement"
IS2_atl11_mask_attrs[ptx]['longitude']['long_name'] = "Longitude"
IS2_atl11_mask_attrs[ptx]['longitude']['standard_name'] = "longitude"
IS2_atl11_mask_attrs[ptx]['longitude']['source'] = "ATL06"
IS2_atl11_mask_attrs[ptx]['longitude']['description'] = ("Center longitude of "
"selected segments")
IS2_atl11_mask_attrs[ptx]['longitude']['valid_min'] = -180.0
IS2_atl11_mask_attrs[ptx]['longitude']['valid_max'] = 180.0
IS2_atl11_mask_attrs[ptx]['longitude']['coordinates'] = \
"ref_pt delta_time latitude"
#-- subsetting variables
IS2_atl11_mask_attrs[ptx]['subsetting']['Description'] = ("The subsetting group "
"contains parameters used to reduce annual land ice height segments to specific "
"regions of interest.")
IS2_atl11_mask_attrs[ptx]['subsetting']['data_rate'] = ("Data within this group "
"are stored at the average segment rate.")
#-- output mask to HDF5
key = RGI_NAME.replace('_',' ')
IS2_atl11_mask[ptx]['subsetting'][RGI_NAME] = associated_map
IS2_atl11_fill[ptx]['subsetting'][RGI_NAME] = None
IS2_atl11_dims[ptx]['subsetting'][RGI_NAME] = ['ref_pt']
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME]['contentType'] = "referenceInformation"
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME]['long_name'] = '{0} Mask'.format(key)
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME]['description'] = ('Mask calculated '
'using the {0} region from the Randolph Glacier Inventory.').format(key)
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME]['source'] = \
'RGIv{0}'.format(RGI_VERSION)
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME]['reference'] = \
'https://www.glims.org/RGI/'
IS2_atl11_mask_attrs[ptx]['subsetting'][RGI_NAME]['coordinates'] = \
"../ref_pt ../delta_time ../latitude ../longitude"
#-- output RGI identifier
IS2_atl11_mask[ptx]['subsetting']['RGIId'] = associated_RGIId
IS2_atl11_fill[ptx]['subsetting']['RGIId'] = None
IS2_atl11_dims[ptx]['subsetting']['RGIId'] = ['ref_pt']
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId'] = collections.OrderedDict()
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId']['contentType'] = "referenceInformation"
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId']['long_name'] = "RGI Identifier"
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId']['description'] = ('Identification '
'code within version {0} of the Randolph Glacier Inventory (RGI).').format(RGI_VERSION)
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId']['source'] = \
'RGIv{0}'.format(RGI_VERSION)
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId']['reference'] = \
'https://www.glims.org/RGI/'
IS2_atl11_mask_attrs[ptx]['subsetting']['RGIId']['coordinates'] = \
"../ref_pt ../delta_time ../latitude ../longitude"
#-- wait for all processes to finish calculation
comm.Barrier()
#-- parallel h5py I/O does not support compression filters at this time
if (comm.rank == 0) and valid_check:
#-- output HDF5 file with RGI masks
fargs = (PRD,RGI_VERSION,RGI_NAME,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX)
file_format = '{0}_RGI{1}_{2}_{3}{4}_{5}{6}_{7}_{8}{9}.h5'
output_file = os.path.join(DIRECTORY,file_format.format(*fargs))
#-- print file information
logging.info('\t{0}'.format(output_file))
#-- write to output HDF5 file
HDF5_ATL11_mask_write(IS2_atl11_mask, IS2_atl11_mask_attrs,
CLOBBER=True, INPUT=os.path.basename(args.file),
FILL_VALUE=IS2_atl11_fill, DIMENSIONS=IS2_atl11_dims,
FILENAME=output_file)
#-- change the permissions mode
os.chmod(output_file, args.mode)
#-- close the input file
fileID.close()
#-- PURPOSE: outputting the masks for ICESat-2 data to HDF5
def HDF5_ATL11_mask_write(IS2_atl11_mask, IS2_atl11_attrs, INPUT=None,
FILENAME='', FILL_VALUE=None, DIMENSIONS=None, CLOBBER=True):
#-- setting HDF5 clobber attribute
if CLOBBER:
clobber = 'w'
else:
clobber = 'w-'
#-- open output HDF5 file
fileID = h5py.File(os.path.expanduser(FILENAME), clobber)
#-- create HDF5 records
h5 = {}
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
h5['ancillary_data'] = {}
for k,v in IS2_atl11_mask['ancillary_data'].items():
#-- Defining the HDF5 dataset variables
val = 'ancillary_data/{0}'.format(k)
h5['ancillary_data'][k] = fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, compression='gzip')
#-- add HDF5 variable attributes
for att_name,att_val in IS2_atl11_attrs['ancillary_data'][k].items():
h5['ancillary_data'][k].attrs[att_name] = att_val
#-- write each output beam pair
pairs = [k for k in IS2_atl11_mask.keys() if bool(re.match(r'pt\d',k))]
for ptx in pairs:
fileID.create_group(ptx)
h5[ptx] = {}
#-- add HDF5 group attributes for beam pair
for att_name in ['description','beam_pair','ReferenceGroundTrack',
'first_cycle','last_cycle','equatorial_radius','polar_radius']:
fileID[ptx].attrs[att_name] = IS2_atl11_attrs[ptx][att_name]
#-- ref_pt, cycle number, geolocation and delta_time variables
for k in ['ref_pt','cycle_number','delta_time','latitude','longitude']:
#-- values and attributes
v = IS2_atl11_mask[ptx][k]
attrs = IS2_atl11_attrs[ptx][k]
fillvalue = FILL_VALUE[ptx][k]
#-- Defining the HDF5 dataset variables
val = '{0}/{1}'.format(ptx,k)
if fillvalue:
h5[ptx][k] = fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, fillvalue=fillvalue, compression='gzip')
else:
h5[ptx][k] = fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, compression='gzip')
#-- create or attach dimensions for HDF5 variable
if DIMENSIONS[ptx][k]:
#-- attach dimensions
for i,dim in enumerate(DIMENSIONS[ptx][k]):
h5[ptx][k].dims[i].attach_scale(h5[ptx][dim])
else:
#-- make dimension
h5[ptx][k].make_scale(k)
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[ptx][k].attrs[att_name] = att_val
#-- add to subsetting variables
fileID[ptx].create_group('subsetting')
h5[ptx]['subsetting'] = {}
for att_name in ['Description','data_rate']:
att_val=IS2_atl11_attrs[ptx]['subsetting'][att_name]
fileID[ptx]['subsetting'].attrs[att_name] = att_val
for k,v in IS2_atl11_mask[ptx]['subsetting'].items():
#-- attributes
attrs = IS2_atl11_attrs[ptx]['subsetting'][k]
fillvalue = FILL_VALUE[ptx]['subsetting'][k]
#-- Defining the HDF5 dataset variables
val = '{0}/{1}/{2}'.format(ptx,'subsetting',k)
if fillvalue:
h5[ptx]['subsetting'][k] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype, fillvalue=fillvalue,
compression='gzip')
else:
h5[ptx]['subsetting'][k] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype, compression='gzip')
#-- attach dimensions
for i,dim in enumerate(DIMENSIONS[ptx]['subsetting'][k]):
h5[ptx]['subsetting'][k].dims[i].attach_scale(h5[ptx][dim])
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[ptx]['subsetting'][k].attrs[att_name] = att_val
#-- HDF5 file title
fileID.attrs['featureType'] = 'trajectory'
fileID.attrs['title'] = 'ATLAS/ICESat-2 Land Ice Height'
fileID.attrs['summary'] = ('Subsetting masks and geophysical parameters '
'for land ice segments needed to interpret and assess the quality '
'of the height estimates.')
fileID.attrs['description'] = ('Land ice parameters for each beam pair. '
'All parameters are calculated for the same along-track increments '
'for each beam pair and repeat.')
date_created = datetime.datetime.today()
fileID.attrs['date_created'] = date_created.isoformat()
project = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'
fileID.attrs['project'] = project
platform = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'
fileID.attrs['project'] = platform
#-- add attribute for elevation instrument and designated processing level
instrument = 'ATLAS > Advanced Topographic Laser Altimeter System'
fileID.attrs['instrument'] = instrument
fileID.attrs['source'] = 'Spacecraft'
fileID.attrs['references'] = 'https://nsidc.org/data/icesat-2'
fileID.attrs['processing_level'] = '4'
#-- add attributes for input ATL11 files
fileID.attrs['input_files'] = ','.join([os.path.basename(i) for i in INPUT])
#-- find geospatial and temporal ranges
lnmn,lnmx,ltmn,ltmx,tmn,tmx = (np.inf,-np.inf,np.inf,-np.inf,np.inf,-np.inf)
for ptx in pairs:
lon = IS2_atl11_mask[ptx]['longitude']
lat = IS2_atl11_mask[ptx]['latitude']
delta_time = IS2_atl11_mask[ptx]['delta_time']
valid = np.nonzero(delta_time != FILL_VALUE[ptx]['delta_time'])
#-- setting the geospatial and temporal ranges
lnmn = lon.min() if (lon.min() < lnmn) else lnmn
lnmx = lon.max() if (lon.max() > lnmx) else lnmx
ltmn = lat.min() if (lat.min() < ltmn) else ltmn
ltmx = lat.max() if (lat.max() > ltmx) else ltmx
tmn = delta_time[valid].min() if (delta_time[valid].min() < tmn) else tmn
tmx = delta_time[valid].max() if (delta_time[valid].max() > tmx) else tmx
#-- add geospatial and temporal attributes
fileID.attrs['geospatial_lat_min'] = ltmn
fileID.attrs['geospatial_lat_max'] = ltmx
fileID.attrs['geospatial_lon_min'] = lnmn
fileID.attrs['geospatial_lon_max'] = lnmx
fileID.attrs['geospatial_lat_units'] = "degrees_north"
fileID.attrs['geospatial_lon_units'] = "degrees_east"
fileID.attrs['geospatial_ellipsoid'] = "WGS84"
fileID.attrs['date_type'] = 'UTC'
fileID.attrs['time_type'] = 'CCSDS UTC-A'
#-- convert start and end time from ATLAS SDP seconds into UTC time
time_utc = convert_delta_time(np.array([tmn,tmx]))
#-- convert to calendar date
YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(time_utc['julian'],
FORMAT='tuple')
#-- add attributes with measurement date start, end and duration
tcs = datetime.datetime(int(YY[0]), int(MM[0]), int(DD[0]),
int(HH[0]), int(MN[0]), int(SS[0]), int(1e6*(SS[0] % 1)))
fileID.attrs['time_coverage_start'] = tcs.isoformat()
tce = datetime.datetime(int(YY[1]), int(MM[1]), int(DD[1]),
int(HH[1]), int(MN[1]), int(SS[1]), int(1e6*(SS[1] % 1)))
fileID.attrs['time_coverage_end'] = tce.isoformat()
fileID.attrs['time_coverage_duration'] = '{0:0.0f}'.format(tmx-tmn)
#-- Closing the HDF5 file
fileID.close()
#-- run main program
if __name__ == '__main__':
main()
|
|
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
import matplotlib.pyplot as PLT
import matplotlib.animation as MOV
import geometry as GEOM
import interferometry as RI
import catalog as CTLG
import constants as CNST
import my_DSP_modules as DSP
catalog_file = '/data3/t_nithyanandan/project_MWA/mwacs_b1_131016.csv'
# catalog_file = '/Users/t_nithyanandan/Downloads/mwacs_b1_131016.csv'
catdata = ascii.read(catalog_file, data_start=1, delimiter=',')
dec_deg = catdata['DEJ2000']
ra_deg = catdata['RAJ2000']
fpeak = catdata['S150_fit']
ferr = catdata['e_S150_fit']
spindex = catdata['Sp+Index']
freq_catalog = 0.150 # in GHz
freq_resolution = 40.0 # in kHz
nchan = 512
chans = freq_catalog + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution * 1e3 / 1e9
bpass = 1.0*NP.ones(nchan)
# Do the following next few lines only for MWA
notch_interval = NP.round(1.28e6 / (freq_resolution * 1e3))
# bpass[::notch_interval] = 0.0
# bpass[1::notch_interval] = 0.0
# bpass[2::notch_interval] = 0.0
oversampling_factor = 1.0
# window = DSP.shaping(nchan, 1/oversampling_factor*CNST.rect_bnw_ratio, shape='bnw', peak=1.0)
window = DSP.shaping(nchan, 1/oversampling_factor, shape='rect', peak=1.0)
bpass *= window
ctlgobj = CTLG.Catalog(freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fpeak)
skymod = CTLG.SkyModel(ctlgobj)
A_eff = 16.0 * (0.5 * FCNST.c / (freq_catalog * 1e9))**2
intrfrmtr = RI.Interferometer('B1', [1000.0, 0.0, 0.0], chans, telescope='mwa',
latitude=-26.701, A_eff=A_eff, freq_scale='GHz')
Tsys = 440.0 # in Kelvin
t_snap = 40 * 60.0 # in seconds
# ha_range = 15.0*NP.arange(-1.0, t_snap/3.6e3, 1.0)
n_snaps = 16
lst_obs = (0.0 + (t_snap / 3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
ha_obs = NP.zeros(n_snaps)
dec_obs = intrfrmtr.latitude + NP.zeros(n_snaps)
for i in xrange(n_snaps):
intrfrmtr.observe(str(lst_obs[i]), Tsys, bpass, [ha_obs[i], dec_obs[i]], skymod, t_snap, roi_radius=30.0, lst=lst_obs[i])
intrfrmtr.delay_transform()
lags = intrfrmtr.lags
vis_lag = intrfrmtr.vis_lag
if oversampling_factor > 1.0:
lags = DSP.downsampler(intrfrmtr.lags, oversampling_factor)
vis_lag = DSP.downsampler(intrfrmtr.vis_lag, oversampling_factor)
noise_info = intrfrmtr.band_averaged_noise_estimate(filter_method='hpf')
fig = PLT.figure(figsize=(14,14))
ax1 = fig.add_subplot(211)
# fig, (ax1, ax2) = PLT.subplots(2,1,figsize=(14,12))
ax1.set_xlabel(r'$\eta$ [$\mu$s]', fontsize=18)
ax1.set_ylabel('Amplitude [Jy Hz]', fontsize=18)
ax1.set_title('Delay Spectrum', fontsize=18, weight='semibold')
ax1.set_yscale('log')
ax1.set_xlim(1e6*NP.amin(lags)-1.0, 1e6*NP.amax(lags)+1.0)
ax1.set_ylim(0.5*NP.amin(NP.abs(intrfrmtr.vis_lag)),2.0*NP.amax(NP.abs(intrfrmtr.vis_lag)))
l1, = ax1.plot([], [], 'g+', markersize=10)
ax1.tick_params(which='major', length=12, labelsize=18)
ax1.tick_params(which='minor', length=6)
# ax2 = fig.add_subplot(212)
# ax2.set_xlim(NP.min(skymod.catalog.location[:,0]), NP.max(skymod.catalog.location[:,0]))
# ax2.set_ylim(NP.min(skymod.catalog.location[:,1])-5.0, NP.max(skymod.catalog.location[:,1])+5.0)
ax2 = fig.add_subplot(212, projection='hammer')
ra_deg = skymod.catalog.location[:,0]
neg_ra = skymod.catalog.location[:,0] > 180.0
ra_deg[neg_ra] = ra_deg[neg_ra] - 360.0
ax2.set_xlabel(r'$\alpha$ [degrees]', fontsize=18)
ax2.set_ylabel(r'$\delta$ [degrees]', fontsize=18)
ax2.set_title('Sky Model', fontsize=18, weight='semibold')
# ax2.text(-2.0, -2.0, 'Sky Model', fontsize=18, va='bottom')
ax2.grid(True)
ax2.tick_params(which='major', length=12, labelsize=18)
ax2.tick_params(which='minor', length=6)
# l2init, = ax2.plot(skymod.catalog.location[:,0], skymod.catalog.location[:,1], 'k.', markersize=1)
l2init, = ax2.plot(NP.radians(ra_deg), NP.radians(skymod.catalog.location[:,1]), 'k.', markersize=1)
l2, = ax2.plot([], [], 'g+', markersize=3)
txt1 = ax1.text(0.05, 0.9, '', transform=ax1.transAxes, fontsize=18)
txt2 = ax2.text(0.25, 0.8, '', transform=ax2.transAxes, fontsize=18)
# def init():
# l1.set_xdata([])
# l1.set_ydata([])
# l2.set_xdata(skymod.catalog.location[:,0])
# l2.set_ydata(skymod.catalog.location[:,1])
# l2.set_marker('.')
# txt1.set_text('')
# txt2.set_text('')
# return l1, l2, txt1, txt2
def update(i, interferometer, eta, delay_spectra, line1, line2, t1, t2):
line1.set_xdata(1e6 * eta)
line1.set_ydata(NP.abs(delay_spectra[i,:]))
# line1.set_xdata(1e6 * interferometer.lags)
# line1.set_ydata(NP.abs(interferometer.vis_lag[i,:]))
# line2.set_xdata(skymod.catalog.location[NP.asarray(interferometer.obs_catalog_indices[i]),0])
# line2.set_ydata(skymod.catalog.location[NP.asarray(interferometer.obs_catalog_indices[i]),1])
line2.set_xdata(NP.radians(ra_deg[NP.asarray(interferometer.obs_catalog_indices[i])]))
line2.set_ydata(NP.radians(skymod.catalog.location[NP.asarray(interferometer.obs_catalog_indices[i]),1]))
label_str = r' $\alpha$ = {0:+.3f} deg, $\delta$ = {1:+.2f} deg'.format(float(interferometer.timestamp[i])-interferometer.pointing_center[i,0], interferometer.pointing_center[i,1])
t1.set_text(label_str)
# t2.set_text(label_str)
t2.set_text('')
return line1, line2, t1, t2
anim = MOV.FuncAnimation(fig, update, fargs=(intrfrmtr, lags, vis_lag, l1, l2, txt1, txt2), frames=vis_lag.shape[0], interval=400, blit=False)
PLT.show()
# # anim.save('/data3/t_nithyanandan/project_MWA/delay_spectrum_animation.gif', fps=2.5, writer='imagemagick')
# anim.save('/Users/t_nithyanandan/Downloads/delay_spectrum_animation_10MHz_1_notch_RECT.gif', fps=2.5, writer='imagemagick')
# # anim.save('/Users/t_nithyanandan/Downloads/delay_spectrum_animation.mp4', fps=2.5, writer='ffmpeg')
# anim.save('/Users/t_nithyanandan/Downloads/delay_spectrum_animation_10MHz_1_notch_RECT.mp4', fps=2.5, writer='ffmpeg')
|
|
from joerd.util import BoundingBox
from joerd.region import RegionTile
from joerd.mkdir_p import mkdir_p
from osgeo import osr, gdal
import logging
import os
import os.path
import errno
import sys
import joerd.composite as composite
import joerd.mercator as mercator
import numpy
import math
from geographiclib.geodesic import Geodesic
import bisect
# Generate a table of heights suitable for use as hypsometric tinting. These
# have only a little precision for bathymetry, and concentrate most of the
# rest in the 0-3000m range, which is where most of the world's population
# lives.
#
# It seemed better to have this as a function which returned the table rather
# than include the table verbatim, as this would be a big blob of unreadable
# numbers.
def _generate_mapping_table():
table = []
for i in range(0, 11):
table.append(-11000 + i * 1000)
table.append(-100)
table.append( -50)
table.append( -20)
table.append( -10)
table.append( -1)
for i in range(0, 150):
table.append(20 * i)
for i in range(0, 60):
table.append(3000 + 50 * i)
for i in range(0, 29):
table.append(6000 + 100 * i)
return table
# Make a constant version of the table for reference.
HEIGHT_TABLE = _generate_mapping_table()
# Function which returns the index of the maximum height in the height table
# which is lower than the input `h`. I.e: it rounds down. We then _flip_ the
# table "backwards" so that low heights have higher indices. This is so that
# when it's displayed on a regular computer, the lower values near sea level
# have high alpha, making them more opaque.
def _height_mapping_func(h):
return 255 - bisect.bisect_left(HEIGHT_TABLE, h)
class NormalTile(mercator.MercatorTile):
def __init__(self, parent, z, x, y):
super(NormalTile, self).__init__(
z, x, y, 256,
parent.mercator.latlon_bbox(z, x, y),
parent.mercator.mercator_bbox(z, x, y))
self.output_dir = parent.output_dir
def freeze_dry(self):
return dict(type='normal', z=self.z, x=self.x, y=self.y)
def render(self, tmp_dir):
logger = logging.getLogger('normal')
bbox = self._mercator_bbox
mid_dir = os.path.join(tmp_dir, self.output_dir,
str(self.z), str(self.x))
mkdir_p(mid_dir)
tile = self.tile_name()
tile_file = os.path.join(tmp_dir, self.output_dir,
tile + ".png")
logger.debug("Generating tile %r..." % tile)
filter_size = 10
outfile = tile_file
dst_bbox = bbox.bounds
dst_x_size = 256
dst_y_size = 256
dst_x_res = float(dst_bbox[2] - dst_bbox[0]) / dst_x_size
dst_y_res = float(dst_bbox[3] - dst_bbox[1]) / dst_y_size
dst_srs = osr.SpatialReference()
dst_srs.ImportFromEPSG(3857)
# expand bbox & image to generate "bleed" for image filter
mid_min_x = dst_bbox[0] - filter_size * dst_x_res
mid_min_y = dst_bbox[1] - filter_size * dst_y_res
mid_max_x = dst_bbox[2] + filter_size * dst_x_res
mid_max_y = dst_bbox[3] + filter_size * dst_y_res
filter_top_margin = filter_size
filter_bot_margin = filter_size
filter_lft_margin = filter_size
filter_rgt_margin = filter_size
# clip bounding box back to the edges of the world. GDAL can handle
# wrapping around the world, but it doesn't give the results that
# would be expected.
if mid_min_x < -0.5 * mercator.MERCATOR_WORLD_SIZE:
filter_lft_margin = 0
mid_min_x = dst_bbox[0]
if mid_min_y < -0.5 * mercator.MERCATOR_WORLD_SIZE:
filter_bot_margin = 0
mid_min_y = dst_bbox[1]
if mid_max_x > 0.5 * mercator.MERCATOR_WORLD_SIZE:
filter_rgt_margin = 0
mid_max_x = dst_bbox[2]
if mid_max_y > 0.5 * mercator.MERCATOR_WORLD_SIZE:
filter_top_margin = 0
mid_max_y = dst_bbox[3]
mid_x_size = dst_x_size + filter_lft_margin + filter_rgt_margin
mid_y_size = dst_y_size + filter_bot_margin + filter_top_margin
mid_bbox = (mid_min_x, mid_min_y, mid_max_x, mid_max_y)
mid_drv = gdal.GetDriverByName("MEM")
mid_ds = mid_drv.Create('', mid_x_size, mid_y_size, 1, gdal.GDT_Float32)
mid_gt = (mid_bbox[0], dst_x_res, 0,
mid_bbox[3], 0, -dst_y_res)
mid_ds.SetGeoTransform(mid_gt)
mid_ds.SetProjection(dst_srs.ExportToWkt())
mid_ds.GetRasterBand(1).SetNoDataValue(mercator.FLT_NODATA)
# figure out what the approximate scale of the output image is in
# lat/lon coordinates. this is used to select the appropriate filter.
ll_bbox = self._latlon_bbox
ll_x_res = float(ll_bbox.bounds[2] - ll_bbox.bounds[0]) / dst_x_size
ll_y_res = float(ll_bbox.bounds[3] - ll_bbox.bounds[1]) / dst_y_size
# calculate the resolution of a pixel in real meters for both x and y.
# this will be used to scale the gradient so that it's consistent
# across zoom levels.
ll_mid_x = 0.5 * (ll_bbox.bounds[2] + ll_bbox.bounds[0])
ll_spc_x = 0.5 * (ll_bbox.bounds[2] - ll_bbox.bounds[0]) / dst_x_size
ll_mid_y = 0.5 * (ll_bbox.bounds[3] + ll_bbox.bounds[1])
ll_spc_y = 0.5 * (ll_bbox.bounds[3] - ll_bbox.bounds[1]) / dst_y_size
geod = Geodesic.WGS84
# NOTE: in defiance of predictability and regularity, the geod methods
# take input as (lat, lon) in that order, rather than (x, y) as would
# be sensible.
# NOTE: at low zooms, taking the width across the tile starts to break
# down, so we take the width across a small portion of the interior of
# the tile instead.
geodesic_res_x = -1.0 / \
geod.Inverse(ll_mid_y, ll_mid_x - ll_spc_x,
ll_mid_y, ll_mid_x + ll_spc_x)['s12']
geodesic_res_y = 1.0 / \
geod.Inverse(ll_mid_y - ll_spc_y, ll_mid_x,
ll_mid_y + ll_spc_y, ll_mid_x)['s12']
composite.compose(self, mid_ds, logger, min(ll_x_res, ll_y_res))
pixels = mid_ds.GetRasterBand(1).ReadAsArray(0, 0, mid_x_size, mid_y_size)
ygrad, xgrad = numpy.gradient(pixels, 2)
img = numpy.dstack((geodesic_res_x * xgrad, geodesic_res_y * ygrad,
numpy.ones((mid_y_size, mid_x_size))))
# first, we normalise to unit vectors. this puts each element of img
# in the range (-1, 1). the "einsum" stuff is serious black magic, but
# what it (should be) saying is "for each i,j in the rows and columns,
# the output is the sum of img[i,j,k]*img[i,j,k]" - i.e: the square.
norm = numpy.sqrt(numpy.einsum('ijk,ijk->ij', img, img))
# the norm is now the "wrong shape" according to numpy, so we need to
# copy the norm value out into RGB components.
norm_copy = norm[:, :, numpy.newaxis]
# dividing the img by norm_copy should give us RGB components with
# values between -1 and 1, but we need values between 0 and 255 for
# PNG channels. so we move and scale the values to fit in that range.
scaled = (128.0 * (img / norm_copy + 1.0))
# and finally clip it to (0, 255) just in case
img = numpy.clip(scaled, 0.0, 255.0)
# Create output as a 4-channel RGBA image, each (byte) channel
# corresponds to x, y, z, h where x, y and z are the respective
# components of the normal, and h is an index into a hypsometric tint
# table (see HEIGHT_TABLE).
dst_ds = mid_drv.Create('', dst_x_size, dst_y_size, 4, gdal.GDT_Byte)
dst_gt = (dst_bbox[0], dst_x_res, 0,
dst_bbox[3], 0, -dst_y_res)
dst_ds.SetGeoTransform(dst_gt)
dst_ds.SetProjection(dst_srs.ExportToWkt())
# apply the height mapping function to get the table index.
func = numpy.vectorize(_height_mapping_func)
hyps = func(pixels).astype(numpy.uint8)
# extract the area without the "bleed" margin.
ext = img[filter_top_margin:(filter_top_margin+dst_y_size), \
filter_lft_margin:(filter_lft_margin+dst_x_size)]
dst_ds.GetRasterBand(1).WriteArray(ext[...,0].astype(numpy.uint8))
dst_ds.GetRasterBand(2).WriteArray(ext[...,1].astype(numpy.uint8))
dst_ds.GetRasterBand(3).WriteArray(ext[...,2].astype(numpy.uint8))
# add hypsometric tint index as alpha channel
dst_ds.GetRasterBand(4).WriteArray(
hyps[filter_top_margin:(filter_top_margin+dst_y_size),
filter_lft_margin:(filter_lft_margin+dst_x_size)])
png_drv = gdal.GetDriverByName("PNG")
png_ds = png_drv.CreateCopy(tile_file, dst_ds)
# explicitly delete the datasources. the Python-GDAL docs suggest that
# this is a good idea not only to dispose of memory buffers but also
# to ensure that the backing file handles are closed.
del png_ds
del dst_ds
del mid_ds
assert os.path.isfile(tile_file)
source_names = [type(s).__name__ for s in self.sources]
logger.info("Done generating tile %r from %s"
% (tile, ", ".join(source_names)))
class Normal:
def __init__(self, regions, sources, options={}):
self.regions = regions
self.sources = sources
self.output_dir = options.get('output_dir', 'normal_tiles')
self.enable_browser_png = options.get('enable_browser_png', False)
self.mercator = mercator.Mercator()
def expand_tile(self, bbox, zoom_range):
tiles = []
for z in range(*zoom_range):
lx, ly = self.mercator.lonlat_to_xy(z, bbox[0], bbox[1])
ux, uy = self.mercator.lonlat_to_xy(z, bbox[2], bbox[3])
ll = self.mercator.latlon_bbox(z, lx, ly).bounds
ur = self.mercator.latlon_bbox(z, ux, uy).bounds
res = max((ll[2] - ll[0]) / 256.0,
(ur[2] - ur[0]) / 256.0)
tiles.append(RegionTile((ll[0], ll[1], ur[2], ur[3]), res))
return tiles
def generate_tiles(self):
logger = logging.getLogger('normal')
for r in self.regions:
rbox = r.bbox.bounds
for zoom in range(*r.zoom_range):
lx, ly = self.mercator.lonlat_to_xy(zoom, rbox[0], rbox[3])
ux, uy = self.mercator.lonlat_to_xy(zoom, rbox[2], rbox[1])
logger.info("Generating %d tiles for region." % ((ux - lx + 1) * (uy - ly + 1),))
for x in range(lx, ux + 1):
for y in range(ly, uy + 1):
bbox = self.latlon_bbox(zoom, x, y)
yield NormalTile(self, zoom, x, y)
def latlon_bbox(self, z, x, y):
return self.mercator.latlon_bbox(z, x, y)
def mercator_bbox(self, z, x, y):
return self.mercator.mercator_bbox(z, x, y)
def rehydrate(self, data):
typ = data.get('type')
assert typ == 'normal', "Unable to rehydrate tile of type %r in " \
"normal output. Job was: %r" % (typ, data)
z = data['z']
x = data['x']
y = data['y']
return NormalTile(self, z, x, y)
def create(regions, sources, options):
return Normal(regions, sources, options)
|
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
def moments_mvou(x_tnow, deltat_m, theta, mu, sig2):
"""For details, see here.
Parameters
----------
x_tnow : array, shape(n_, )
deltat_m : array, shape(m_, )
theta : array, shape(n_, n_)
mu : array, shape(n_, )
sig2 : array, shape(n_, n_)
Returns
-------
mu_dt_m : array, shape(m_, n_)
mu_deltat_m : array, shape(m_, n_) AC: mu_deltat_m
sig2_deltat_m : array, shape(m_, n_, n_)
"""
if len(x_tnow.shape) != 1:
x_tnow = x_tnow.reshape(-1, 1).copy()
if len(mu.shape) != 1:
mu = mu.reshape(-1, 1).copy()
n_ = x_tnow.shape[0]
if isinstance(deltat_m, float) or isinstance(deltat_m, np.int64):
m_ = 1
deltat_m = np.array([deltat_m])
else:
m_ = len(deltat_m)
mu_dt_m = np.zeros((m_, n_))
mu_deltat_m = np.zeros((m_, n_))
sig2_deltat_m = np.zeros((m_, n_, n_))
for m, tm in np.ndenumerate(deltat_m):
# Step 1: compute drift of shocks
mu_dt_m[[m], :] = (np.eye(n_) - sp.linalg.expm(-theta * tm)) \
@ (np.linalg.solve(theta, mu))
# Step 2: compute drift of process
mu_deltat_m[[m], :] = sp.linalg.expm(-theta * tm) @ x_tnow + \
mu_dt_m[[m], :]
# Step 3: compute covariance of process
th_sum_th = sp.linalg.kron(theta, np.eye(n_)) + \
sp.linalg.kron(np.eye(n_), theta)
vecsig2 = np.reshape(sig2, (n_ ** 2, 1), 'F')
vecsig2_m = np.linalg.solve(th_sum_th, (np.eye(n_ ** 2) -
sp.linalg.expm(-th_sum_th * tm))) @ vecsig2
sig2_m = np.reshape(vecsig2_m, (n_, n_), 'F')
# grant numerical symmetry
sig2_deltat_m[[m], :, :] = (sig2_m + sig2_m.T) / 2
# resize
if n_ != 1:
mu_dt_m = mu_dt_m.squeeze()
mu_deltat_m = mu_deltat_m.squeeze()
sig2_deltat_m = np.atleast_2d(sig2_deltat_m.squeeze())
return mu_dt_m, mu_deltat_m, sig2_deltat_m
|
|
import argparse
from scipy.optimize import differential_evolution
from sklearn.naive_bayes import MultinomialNB
from imblearn.metrics import geometric_mean_score
import numpy as np
import pickle
with open('../X_train.pickle', 'rb') as f:
X_train = pickle.load(f)
with open('../y_train.pickle', 'rb') as f:
y_train = pickle.load(f)
with open('../X_test.pickle', 'rb') as f:
X_test = pickle.load(f)
with open('../y_test.pickle', 'rb') as f:
y_test = pickle.load(f)
__author__ = "Manolomon"
__license__ = "MIT"
__version__ = "1.0"
import pickle
def logger(xa, convergence):
x_str = np.array_repr(xa).replace('\n', '')
print(x_str)
def fitness_func(individual): # Fitness Function
global X_train
global y_train
global X_test
global y_test
classifier = MultinomialNB(
alpha=individual[0],
fit_prior=False if individual[1] < 0.5 else True
)
classifier.fit(X_train, y_train)
g_mean = geometric_mean_score(
y_test, classifier.predict(X_test), average='weighted')
del classifier
return -1 * g_mean
bounds = [
(0, 100), # alpha: float, default=1.0
(0, 1), # fit_prior: bool, default=True
]
if __name__ == "__main__":
ap = argparse.ArgumentParser(
description='MultinomialNB Hyperparameter tuning for software requirements categorization using Differential Evolution')
ap.add_argument("-v", "--verbose",
help="increase output verbosity", action="store_true")
ap.add_argument('--np', dest='np', type=int,
required=True, help='Population size')
ap.add_argument('--max_gen', dest='max_gen', type=int,
required=True, help='Genarations')
ap.add_argument('--f', dest='f', type=float,
required=True, help='Scale Factor')
ap.add_argument('--cr', dest='cr', type=float,
required=True, help='Crossover percentage')
ap.add_argument('--datfile', dest='datfile', type=str,
help='File where it will be save the score (result)')
args = ap.parse_args()
result = differential_evolution(fitness_func, bounds, disp=True, popsize=args.np, maxiter=args.max_gen,
mutation=args.f, recombination=args.cr, strategy='rand1bin', callback=logger)
#print("Best individual: [alpha=%s, fit_prior=%s] f(x)=%s" % (result.x[0], False if result.x[1] < 0.5 else True, result.fun*(-100)))
if args.datfile:
with open(args.datfile, 'w') as f:
f.write(str(result.fun*(-100)))
|
|
from tisane.family import SquarerootLink
from tisane.data import Dataset
from tisane.variable import AbstractVariable
from tisane.statistical_model import StatisticalModel
from tisane.random_effects import (
RandomIntercept,
RandomSlope,
CorrelatedRandomSlopeAndIntercept,
UncorrelatedRandomSlopeAndIntercept,
)
import os
from typing import List, Any, Tuple
import typing
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
### GLOBALs
pymer4_preamble = """
# Tisane inferred the following statistical model based on this query: {}
import pandas as pd
from pymer4.models import Lmer # supports Generalized linear models with or without mixed effects
import matplotlib.pyplot as plt # for visualizing residual plots to diagnose model fit
"""
statsmodels_preamble = """
# Tisane inferred the following statistical model based on this query: {}
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt # for visualizing residual plots to diagnose model fit
"""
model_function_wrapper = """
def fit_model():
"""
model_diagnostics_function_wrapper = """
# What should you look for in the plot?
# If there is systematic bias in how residuals are distributed, you may want to try a new link or family function. You may also want to reconsider your conceptual and statistical models.
# Read more here: https://sscc.wisc.edu/sscc/pubs/RegressionDiagnostics.html
def show_model_diagnostics(model):
"""
main_function = """
if __name__ == "__main__":
model = fit_model()
show_model_diagnostics(model)
"""
load_data_from_csv_template = """
df = pd.read_csv('{path}')
"""
load_data_from_dataframe_template = """
# Dataframe is stored in local file: data.csv
# You may want to replace the data path with an existing data file you already have.
# You may also set df equal to a pandas dataframe you are already working with.
df = pd.read_csv('{path}') # Make sure that the data path is correct
"""
load_data_no_data_source = """
# There was no data assigned to the Design. Add data below.
path = '' # Specify path to data if loading from a csv
df = pd.read_csv(path)
# If loading from a pandas Dataframe, alias dataframe with variable df
# df = <your pandas Dataframe>
"""
pymer4_model_template = """
model = Lmer(formula={formula}, family=\"{family_name}\", data=df)
print(model.fit())
return model
"""
statsmodels_model_template = """
model = smf.glm(formula={formula}, data=df, family=sm.families.{family_name}(sm.families.links.{link_obj}))
res = model.fit()
print(res.summary())
return model
"""
pymer4_model_diagnostics = """
plt.axhline(y=0, color='r', linestyle='-')
plt.scatter(model.fits, model.residuals)
plt.title("Fitted values vs. Residuals")
plt.xlabel("fitted values")
plt.ylabel("residuals")
plt.show()
"""
statsmodels_model_diagnostics = """
res = model.fit()
plt.clf()
plt.grid(True)
plt.axhline(y=0, color='r', linestyle='-')
plt.plot(res.predict(linear=True), res.resid_pearson, 'o')
plt.xlabel("Linear predictor")
plt.ylabel("Residual")
plt.show()
"""
pymer4_code_templates = {
"preamble": pymer4_preamble,
"model_function_wrapper": model_function_wrapper,
"load_data_from_csv_template": load_data_from_csv_template,
"load_data_from_dataframe_template": load_data_from_dataframe_template,
"load_data_no_data_source": load_data_no_data_source,
"model_template": pymer4_model_template,
"model_diagnostics_function_wrapper": model_diagnostics_function_wrapper,
"model_diagnostics": pymer4_model_diagnostics,
"main_function": main_function,
}
statsmodels_code_templates = {
"preamble": statsmodels_preamble,
"model_function_wrapper": model_function_wrapper,
"load_data_from_csv_template": load_data_from_csv_template,
"load_data_from_dataframe_template": load_data_from_dataframe_template,
"load_data_no_data_source": load_data_no_data_source,
"model_template": statsmodels_model_template,
"model_diagnostics_function_wrapper": model_diagnostics_function_wrapper,
"model_diagnostics": statsmodels_model_diagnostics,
"main_function": main_function,
}
# Reference from:
pymer4_family_name_to_functions = {
"GaussianFamily": "gaussian",
"InverseGaussianFamily": "inverse_gaussian",
"GammaFamily": "gamma",
# Not implemented in pymer4 or lme4
# "TweedieFamily": "Tweedie",
"PoissonFamily": "poisson",
"BinomialFamily": "binomial",
# Not implemented in pymer4 or lme4
# "NegativeBinomialFamily": "NegativeBinomial",
}
# Lme4 implements defaults for the link functions based on the family functions
pymer4_link_name_to_functions = {}
# Reference from: https://www.statsmodels.org/stable/glm.html#families
statsmodels_family_name_to_functions = {
"GaussianFamily": "Gaussian",
"InverseGaussianFamily": "InverseGaussian",
"GammaFamily": "Gamma",
"TweedieFamily": "Tweedie",
"PoissonFamily": "Poisson",
"BinomialFamily": "Binomial",
"NegativeBinomialFamily": "NegativeBinomial",
}
statsmodels_link_name_to_functions = {
"IdentityLink": "identity()",
"InverseLink": "inverse_power()",
"InverseSquaredLink": "inverse_squared()",
"LogLink": "log()",
"LogitLink": "logit()",
"ProbitLink": "probit()",
"CauchyLink": "cauchy()",
"CLogLogLink": "cloglog()",
"PowerLink": "Power()",
"SquarerootLink": "Power(power=.5)",
# Not currently implemented in statsmodels
# "OPowerLink": "",
"NegativeBinomialLink": "NegativeBinomial()",
# Not currently implemented in statsmodels
# "LogLogLink": "",
}
### HELPERS
def absolute_path(p: str) -> str:
return os.path.join(os.path.dirname(os.path.abspath(__file__)), p)
# Write data out to path
# Return path
def write_out_dataframe(data: Dataset) -> os.path:
destinationDir = os.getcwd()
output_filename = os.path.join(destinationDir, "data.csv")
# path = absolute_path("data.csv")
assert data.has_data()
data.get_data().to_csv(output_filename)
return output_filename
# @param target describes the backend for which to generate code
def generate_code(
statistical_model: StatisticalModel, target: str = "PYTHON", **kwargs
):
if target.upper() == "PYTHON":
return generate_python_code(statistical_model=statistical_model, **kwargs)
def generate_python_code(statistical_model: StatisticalModel):
global pymer4_code_templates
if statistical_model.has_random_effects():
return generate_pymer4_code(statistical_model=statistical_model)
else:
assert not statistical_model.has_random_effects()
return generate_statsmodels_code(statistical_model=statistical_model)
def generate_pymer4_code(statistical_model: StatisticalModel):
global pymer4_code_templates
### Specify preamble
preamble = pymer4_code_templates["preamble"]
### Generate data code
data_code = None
if not statistical_model.has_data():
data_code = pymer4_code_templates["load_data_no_data_source"]
else:
data = statistical_model.get_data()
if data.has_data_path():
data_code = pymer4_code_templates["load_data_from_csv_template"]
data_code = data_code.format(path=str(data.data_path))
else:
assert not data.has_data_path()
data_path = write_out_dataframe(data)
data_code = pymer4_code_templates[
"load_data_from_dataframe_template"
].format(path=data_path)
### Generate model code
model_code = generate_pymer4_model(statistical_model=statistical_model)
### Generate model diagnostics code for plotting residuals vs. fitted
model_diagnostics_code = pymer4_code_templates["model_diagnostics"]
### Put everything together
model_function_wrapper = pymer4_code_templates["model_function_wrapper"]
model_diagnostics_function_wrapper = pymer4_code_templates[
"model_diagnostics_function_wrapper"
]
main_function = pymer4_code_templates["main_function"]
assert data_code is not None
# Return string to write out to script
return (
preamble
+ "\n"
+ model_function_wrapper
+ data_code
+ "\n"
+ model_code
+ "\n"
+ model_diagnostics_function_wrapper
+ model_diagnostics_code
+ "\n"
+ main_function
)
def generate_pymer4_model(statistical_model: StatisticalModel):
global pymer4_code_templates
formula_code = generate_pymer4_formula(statistical_model=statistical_model)
family_code = generate_pymer4_family(statistical_model=statistical_model)
# link_code = generate_pymer4_link(statistical_model=statistical_model)
model_code = pymer4_code_templates["model_template"].format(
formula=formula_code, family_name=family_code
)
return model_code
def generate_pymer4_formula(statistical_model: StatisticalModel):
global pymer4_code_templates
dv_code = "{dv} ~ "
dv_code = dv_code.format(dv=statistical_model.dependent_variable.name)
main_code = str()
sm_main_effects_names = [var.name for var in statistical_model.main_effects]
sm_main_effects_names.sort() # Alphabetize
for var_name in sm_main_effects_names:
if len(main_code) == 0:
main_code = f"{var_name}"
else:
main_code += f" + {var_name}"
interaction_code = str()
sm_interaction_effects_names = [
var.name for var in statistical_model.interaction_effects
]
sm_interaction_effects_names.sort() # Alphabetize
for var_name in sm_interaction_effects_names:
if len(interaction_code) == 0:
interaction_code = f"{var_name}"
else:
interaction_code += f" + {var_name}"
# https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#model-specification
random_code = str()
for rc in statistical_model.random_effects:
if isinstance(rc, RandomSlope):
groups = rc.groups
iv = rc.iv
rc_code = f"(0+{iv.name}|{groups.name})"
elif isinstance(rc, RandomIntercept):
groups = rc.groups
rc_code = f"(1|{groups.name})"
elif isinstance(rc, CorrelatedRandomSlopeAndIntercept):
groups = rc.random_slope.groups
assert groups == rc.random_intercept.groups
iv = rc.random_slope.iv
rc_code = f"(1+{iv.name}|{groups.name})"
else:
assert isinstance(rc, UncorrelatedRandomSlopeAndIntercept)
groups = rc.random_slope.groups
assert groups == rc.random_intercept.groups
iv = rc.random_slope.iv
rc_code = f"(1|{groups.name}) + (0+{iv.name}|{groups.name})"
if len(random_code) == 0:
random_code = rc_code
else:
random_code += " + " + rc_code
# Do we have both main effects and interaction effects?
post_main_connector = ""
if len(main_code) > 0:
if len(interaction_code) > 0 or len(random_code) > 0:
post_main_connector = " + "
post_interaction_connector = ""
if len(interaction_code) > 0:
if len(random_code) > 0:
post_interaction_connector = " + "
return (
"'"
+ dv_code
+ main_code
+ post_main_connector
+ interaction_code
+ post_interaction_connector
+ random_code
+ "'"
)
def generate_pymer4_family(statistical_model: StatisticalModel) -> str:
global pymer4_family_name_to_functions
sm_family = statistical_model.family_function
sm_family_name = type(sm_family).__name__
return pymer4_family_name_to_functions[sm_family_name]
# def generate_pymer4_link(statistical_model=StatisticalModel) -> str:
# return str()
def generate_statsmodels_code(statistical_model: StatisticalModel):
global statsmodels_code_templates
### Specify preamble
preamble = statsmodels_code_templates["preamble"]
### Generate data code
data_code = None
if not statistical_model.has_data():
data_code = statsmodels_code_templates["load_data_no_data_source"]
else:
data = statistical_model.get_data()
if data.data_path is not None:
data_code = statsmodels_code_templates["load_data_from_csv_template"]
data_code = data_code.format(path=str(data.data_path))
else:
assert data.data_path is None
data_path = write_out_dataframe(data)
data_code = statsmodels_code_templates[
"load_data_from_dataframe_template"
].format(path=data_path)
### Generate model code
formula_code = generate_statsmodels_formula(statistical_model=statistical_model)
family_code = generate_statsmodels_family(statistical_model=statistical_model)
link_code = generate_statsmodels_link(statistical_model=statistical_model)
model_code = statsmodels_code_templates["model_template"].format(
formula=formula_code, family_name=family_code, link_obj=link_code
)
model_diagnostics_code = statsmodels_code_templates["model_diagnostics"]
### Put everything together
model_function_wrapper = statsmodels_code_templates["model_function_wrapper"]
model_diagnostics_function_wrapper = statsmodels_code_templates[
"model_diagnostics_function_wrapper"
]
main_function = statsmodels_code_templates["main_function"]
assert data_code is not None
# Return string to write out to script
return (
preamble
+ "\n"
+ model_function_wrapper
+ data_code
+ "\n"
+ model_code
+ "\n"
+ model_diagnostics_function_wrapper
+ model_diagnostics_code
+ "\n"
+ main_function
)
def generate_statsmodels_model(statistical_model: StatisticalModel):
global statsmodels_code_templates
formula_code = generate_statsmodels_formula(statistical_model=statistical_model)
family_code = generate_statsmodels_family(statistical_model=statistical_model)
link_code = generate_statsmodels_link(statistical_model=statistical_model)
model_code = statsmodels_code_templates["model_template"].format(
formula=formula_code, family_name=family_code, link_obj=link_code
)
return model_code
def generate_statsmodels_formula(statistical_model: StatisticalModel):
dv_code = "{dv} ~ "
dv_code = dv_code.format(dv=statistical_model.dependent_variable.name)
main_code = str()
sm_main_effects_names = [var.name for var in statistical_model.main_effects]
sm_main_effects_names.sort() # Alphabetize
for var_name in sm_main_effects_names:
if len(main_code) == 0:
main_code = f"{var_name}"
else:
main_code += f" + {var_name}"
interaction_code = str()
sm_interaction_effects_names = [
var.name for var in statistical_model.interaction_effects
]
sm_interaction_effects_names.sort() # Alphabetize
for var_name in sm_interaction_effects_names:
if len(interaction_code) == 0:
interaction_code = f"{var_name}"
else:
interaction_code += f" + {var_name}"
# Do we have both main effects and interaction effects?
post_main_connector = ""
if len(main_code) > 0:
if len(interaction_code) > 0:
post_main_connector = " + "
return "'" + dv_code + main_code + post_main_connector + interaction_code + "'"
# @returns string of family function in statsmodels corresponding to @param statistical_model's family function (of AbstractFamily type)
def generate_statsmodels_family(statistical_model: StatisticalModel) -> str:
global statsmodels_family_name_to_functions
sm_family = statistical_model.family_function
sm_family_name = type(sm_family).__name__
return statsmodels_family_name_to_functions[sm_family_name]
def generate_statsmodels_link(statistical_model=StatisticalModel):
global statsmodels_link_name_to_functions
sm_link = statistical_model.link_function
sm_link_name = type(sm_link).__name__
return statsmodels_link_name_to_functions[sm_link_name]
def generate_statsmodels_glm_code(statistical_model: StatisticalModel, **kwargs) -> str:
has_random = len(statistical_model.random_ivs) > 0
assert has_random is False
# Intercept added automatically with formula unless specified otherwise
if "no_intercept" in kwargs:
model = sm.GLM
# TODO: Might not need to get data again, just reuse existing code in "outer" code gen function
else:
## Build FORMULA
model = "model = smf.glm"
y = statistical_model.dv
y_code = f"{y.name}"
xs_code = None
for f in statistical_model.fixed_ivs:
if xs_code is None:
xs_code = f"{f.name}"
else:
xs_code += f" + {f.name}"
for interaction in statistical_model.interactions:
ixn_terms = list()
for e in interaction:
assert isinstance(e, AbstractVariable)
ixn_terms.append(e.name)
mult = "*"
if xs_code is None:
xs_code = f"{mult.join(ixn_terms)}"
else:
xs_code += f" + {mult.join(ixn_terms)}"
formula_code = "formula=" + '"' + y_code + " ~ " + xs_code + '"'
data_code = "data=df"
model_code = model + "(" + formula_code + "," + data_code + ","
## LINK
link = statistical_model.link_function.upper()
link_code = "sm.families.links."
if "IDENTITY" in link:
link_code += "identity()"
elif "LOG" in link and "LOGLOG" not in link:
link_code += "log()"
elif "CLOGLOG" in link:
link_code += "cloglog()"
elif "SQUAREROOT" in link:
raise NotImplementedError
elif "INVERSE" in link and "SQUARED" not in link:
link_code += "inverse_power()"
elif "INVERSESQUARED" in link:
link_code += "inverse_squared()"
elif "POWER" in link:
link_code += "Power()"
elif "CAUCHY" in link:
link_code += "cauchy()"
elif "LOGLOG" in link:
link_code += ""
elif "PROBIT" in link:
link_code += "probit()"
elif "LOGIT" in link:
# The default link for the Binomial family is the logit link. Available links are logit, probit, cauchy, log, and cloglog.
link_code += "Logit()"
elif "NEGATIVEBINOMIAL" in link:
# Optional parameter to pass to underlying nbinom function
if "alpha" in kwargs:
link_code += f"nbinom({alpha})"
link_code += "nbinom()"
## FAMILY
family = statistical_model.family.upper()
# BINOMIAL
if "BINOMIAL" in family.upper() and "NEGATIVE" not in family.upper():
family_code = f"sm.families.Binomial({link_code})"
# GAMMA
elif "GAMMA" in family.upper():
family_code = f"sm.families.Gamma({link_code})"
# GAUSSIAN
elif "GAUSSIAN" in family.upper() and "INVERSE" not in family.upper():
family_code = f"sm.families.Gaussian({link_code})"
# INVERSEGAUSSIAN
elif "INVERSEGAUSSIAN" in family.upper():
family_code = f"sm.families.InverseGaussian({link_code})"
# NEGATIVEBINOMIAL
elif "NEGATIVEBINOMIAL" in family.upper():
# Optional parameter to pass to family function
if "alpha" in kwargs:
alpha = kawrgs["alpha"]
family_code = f"sm.families.NegativeBinomial({link_code}, {alpha})"
family_code = f"sm.families.NegativeBinomial({link_code})"
# POISSON
elif "POISSON" in family.upper():
family_code = f"sm.families.Poisson({link_code})"
# TWEEDIE
elif "TWEEDIE" in family.upper():
# Optional parameter to pass to underlying nbinom function Notes in
# statssmodels v0.12.2 doc: "If True, the Extended Quasi-Likelihood is used,
# else the likelihood is used (however the latter is not
# implemented). If eql is True, var_power must be between 1 and 2."
if "var_power" in kwargs:
var_power = int(kwargs["var_power"])
if var_power > 1 and var_power < 2:
family_code = f"sm.families.Tweedie({link_code}, {var_power}, eql=True)"
family_code = f"sm.families.Tweedie({link_code})"
## Assemble and update model
model_code += "family=" + family_code + ")"
return model_code
def generate_statsmodels_glmm_code(statistical_model: StatisticalModel, **kwargs):
family = statistical_model.family
link = statistical_model.link_function
# Intercept added automatically with formula unless specified otherwise
if "no_intercept" in kwargs:
model = sm.GLM
# TODO: Might not need to get data again, just reuse existing code in "outer" code gen function
else:
pass
model = "model = "
## FAMILY
# BINOMIAL
if "BINOMIAL" in family.upper() and "NEGATIVE" not in family.upper():
model += f"BinomialBayesMixedGLM"
# GAMMA
elif "GAMMA" in family.upper():
raise NotImplementedError
# GAUSSIAN
elif "GAUSSIAN" in family.upper() and "INVERSE" not in family.upper():
# model += f'smf.mixedlm'
# TODO: CHECK THAT LINK IS IDENTITY AS WELL
model += f"sm.MixedLM.from_formula"
# INVERSEGAUSSIAN
elif "INVERSEGAUSSIAN" in family.upper():
raise NotImplementedError
# NEGATIVEBINOMIAL
elif "NEGATIVEBINOMIAL" in family.upper():
raise NotImplementedError
# POISSON
elif "POISSON" in family.upper():
model += f"PoissonBayesMixedGLM"
## Build FORMULA
y = statistical_model.dv
y_code = f"{y.name}"
xs_code = None
for f in statistical_model.fixed_ivs:
if xs_code is None:
xs_code = f"{f.name}"
else:
xs_code += f" + {f.name}"
for interaction in statistical_model.interactions:
ixn_terms = list()
for e in interaction:
assert isinstance(e, AbstractVariable)
ixn_terms.append(e.name)
mult = "*"
if xs_code is None:
xs_code = f"{mult.join(ixn_terms)}"
else:
xs_code += f" + {mult.join(ixn_terms)}"
# Ex: vc = {'classroom': '0 + C(classroom)'}
vc = "vc_formula = {" # For storing the variance components or random intercepts and slopes
vc_started = False
groups = "groups = " # For random slopes
re_formula = 're_formula = "1'
exactly_one_group = False
for re in statistical_model.random_ivs:
if isinstance(re, RandomIntercept):
if vc_started:
vc += " , "
vc += f'"{re.groups.name}" : ' + f'"0 + C({re.groups.name})"'
vc_started = True
elif isinstance(re, CorrelatedRandomSlopeAndIntercept):
group = re.groups
if vc_started:
vc += " , "
vc += f'"{re.groups.name}" : ' + f'"0 + C({re.groups.name})"'
vc_started = True
groups += f'"{group.name}"'
exactly_one_group = not exactly_one_group
iv = re.iv
assert iv.name in xs_code # Make sure the iv is included as an IV/X already
re_formula += " + " + f'{iv.name}"'
elif isinstance(re, UncorrelatedRandomSlopeAndIntercept):
group = re.groups
if vc_started:
vc += " , "
vc += f'"{re.groups.name}" : ' + f'"0 + C({re.groups.name})"'
vc_started = True
groups += f'"{group.name}"'
exactly_one_group = not exactly_one_group
iv = re.iv
assert iv.name in xs_code # Make sure the iv is included as an IV/X already
elif isinstance(re, RandomSlope):
group = re.groups
random_slope = f'"{group.name}"'
groups += random_slope
exactly_one_group = not exactly_one_group
iv = re.iv
# if iv.name not in xs_code:
# import pdb; pdb.set_trace()
# assert(iv.name in xs_code) # Make sure the iv is included as an IV/X already
re_formula += " + " + f'{iv.name}"'
# print(re)
vc += "}" # Add closing curly brace
formula_code = "formula=" + '"' + y_code + " ~ " + xs_code + '"'
data_code = "data=df"
model_code = (
model
+ "("
+ formula_code
+ ","
+ vc
+ ","
+ re_formula
+ ","
+ groups
+ ","
+ data_code
+ ")"
)
return model_code
# def generate_statsmodels_model_code(statistical_model: StatisticalModel, **kwargs):
# model_code = str()
# has_fixed = len(statistical_model.fixed_ivs) > 0
# has_interactions = len(statistical_model.interactions) > 0
# has_random = len(statistical_model.random_ivs) > 0
# has_data = statistical_model.data is not None # May not have data
# # Does the statistical model have random effects (slope or intercept) that we should take into consideration?
# if has_random:
# return generate_statsmodels_glmm_code(statistical_model=statistical_model)
# else:
# # GLM: Fixed, interactions, no random; Other family
# return generate_statsmodels_glm_code(statistical_model=statistical_model)
|
|
import cv2
import numpy as np
class Cartoonfy(object):
def __init__(self, image_path):
self.image_path = image_path
def cartoonfy(self):
image = cv2.imread(self.image_path)
image = cv2.resize(image, (int(image.shape[1] *.4), int(image.shape[0] * .4)))
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
imageBlur = cv2.medianBlur(imageGray, ksize=5)
imageThreshHold = cv2.adaptiveThreshold(imageBlur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
imageThreshHold2 = cv2.adaptiveThreshold(imageBlur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 9, 9)
imageCanny = cv2.Canny(imageBlur, 31, 39)
# A bilateral filter is a non-linear, edge-preserving, and noise-reducing smoothing filter for images.
filteredImage = cv2.bilateralFilter(image, 9, 100, 100)
cattonImage = cv2.bitwise_and(filteredImage,filteredImage,mask=imageThreshHold)
cattonImage2 = cv2.bitwise_and(filteredImage, filteredImage, mask=imageThreshHold2)
cattonImage3 = cv2.bitwise_and(filteredImage, filteredImage, mask=imageCanny)
#
allCartoons = np.hstack([cattonImage, cattonImage2, cattonImage3])
cv2.imshow("CARTOONIFY", allCartoons)
cv2.waitKey(0)
cartoonify = Cartoonfy('../images/me.jpg')
if __name__ == '__main__':
cartoonify.cartoonfy()
|
|
# Utlity Imports
import pickle
import numpy as np
import pandas as pd
import os
import json
from tqdm import tqdm
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
# %matplotlib inline
# Tensorflow and Keras imports
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow as tf
# Parameters of Time Series Forecasting
n_steps = 20 # alpha
n_features = 5 # Features
n_output = 1 # Future
# time_series can be read like an array for more information look into Data_Util.zip
time_series = pickle.load(open("time_series.pkl", "rb"))
# Split the raw data into required data
def split_sequences(sequences, n_steps, n_output, start, end):
X, y = list(), list()
# Select all the features
arr = sequences[0].reshape((sequences[0].shape[0], 1))
for i in range(1, len(sequences)-1):
arr = np.hstack((arr, sequences[i].reshape((sequences[0].shape[0], 1))))
# Set ranges
for i in range(start, end):
end_ix = i + n_steps
if end_ix + n_output >= end:
break
seq_x, seq_y = arr[i:end_ix, :], arr[end_ix: end_ix+n_output, :]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# Creates the CNN Model
def getLSTMModel(X, y, n_steps=n_steps,n_output=n_output, n_features = 5):
# es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)
es = EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)
model = Sequential()
model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features)))
model.add(LSTM(50, activation='relu'))
model.add(Dense(n_output*n_features))
model.compile(optimizer='adam', loss='mse')
X = X.reshape((X.shape[0], X.shape[1], n_features))
model.fit(X, y, epochs=100, verbose=0,batch_size=16, callbacks=[es])
return model
def getPredictions(X_test, model):
test = X_test.reshape((X_test.shape[0], X_test.shape[1], n_features))
yhat = model.predict(test, verbose=2)
return yhat
def createDataset(time_series, n_steps, idx, n_output):
total_length = (time_series[idx][0].shape[0])
splitSize = int(total_length*0.7)
X,y = split_sequences(time_series[idx], n_steps, n_output, 0, splitSize + n_output)
X_test,y_test = split_sequences(time_series[idx], n_steps, n_output, splitSize - n_steps - 1, total_length)
return X, y, X_test, y_test
def run_for_index(idx, model, name, n_output, n_features):
X, y, X_test, y_test = createDataset(time_series, n_steps, idx, n_output)
out_shape = y.shape[1] * y.shape[2]
y = y.reshape((y.shape[0], out_shape))
M = model(X, y, n_steps, n_output, n_features)
yhat = getPredictions(X_test, M)
yhat = yhat.reshape((yhat.shape[0], n_output, n_features))
preds = np.hstack((yhat[:-1, 0, 0], yhat[-1, :, 0]))
orig = np.hstack((y_test[:-1, 0, 0], y_test[-1, :, 0]))
plt.plot(preds)
plt.plot(orig)
# Save prediction and test by uncommenting the following line
# pd.DataFrame((preds, orig)).to_csv(f'./{name}/{name}_{n_output}/{time_series[idx][5].strip(".csv")}_{name}_{n_output}' , header=None, index=None)
# Run for all time series
for i in tqdm(range(0,len(time_series))):
run_for_index(i, getCNNModel, "CNN", n_output, n_features)
|
|
"""
==============================
Customizing dashed line styles
==============================
The dashing of a line is controlled via a dash sequence. It can be modified
using `.Line2D.set_dashes`.
The dash sequence is a series of on/off lengths in points, e.g.
``[3, 1]`` would be 3pt long lines separated by 1pt spaces.
Some functions like `.Axes.plot` support passing Line properties as keyword
arguments. In such a case, you can already set the dashing when creating the
line.
*Note*: The dash style can also be configured via a
:doc:`property_cycle </tutorials/intermediate/color_cycle>`
by passing a list of dash sequences using the keyword *dashes* to the
cycler. This is not shown within this example.
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 10, 500)
y = np.sin(x)
fig, ax = plt.subplots()
# Using set_dashes() to modify dashing of an existing line
line1, = ax.plot(x, y, label='Using set_dashes()')
line1.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break
# Using plot(..., dashes=...) to set the dashing when creating a line
line2, = ax.plot(x, y - 0.2, dashes=[6, 2], label='Using the dashes parameter')
ax.legend()
plt.show()
|
|
"""Resistively and capacitively shunted junction (RCSJ) model.
For details, see Tinkham §6.3.
All units are SI unless explicitly stated otherwise.
The following notation is used:
Ic critical_current
R resistance
C capacitance
"""
import numpy as np
from scipy.constants import e, hbar
def plasma_frequency(Ic, C):
return np.sqrt(2 * e * Ic / (hbar * C))
def quality_factor(Ic, R, C):
"""Compute the quality factor of an RCSJ.
The quality factor distinguishes overdamped (Q < 1) from underdamped (Q > 1) junctions.
"""
return plasma_frequency(Ic=Ic, C=C) * R * C
def retrapping_current(Ic, R, C):
"""Estimate the retrapping current of an underdamped (hysteretic) RCSJ."""
return 4 * Ic / (np.pi * quality_factor(Ic=Ic, R=R, C=C))
|
|
import abc
from typing import List, Tuple, Optional, Generator
import numpy as np
import cv2
class _BaseDetector(abc.ABC):
@abc.abstractmethod
def _resize_image(self, image: np.ndarray):
pass
@abc.abstractmethod
def init_session(self):
pass
@abc.abstractmethod
def close_session(self):
pass
@abc.abstractmethod
def _run_inference(self, image: np.ndarray):
pass
@abc.abstractmethod
def _detect_on_image(self, image: np.ndarray):
pass
@abc.abstractmethod
def detect_on_images(self, *images: List[np.ndarray]):
pass
@abc.abstractmethod
def _visualize(self, image: np.ndarray, detections: dict):
pass
@abc.abstractmethod
def visualize_detection_on_images(self, *images: List[np.ndarray]):
pass
class BaseDetector(_BaseDetector):
def __init__(self, model_image_size: Optional[Tuple[int, int]] = None) -> None:
self._model_image_size = model_image_size
def _resize_image(self, image: np.ndarray) -> np.ndarray:
if self._model_image_size is not None:
image = cv2.resize(image, self._model_image_size, cv2.INTER_AREA)
return image
def init_session(self):
pass
def close_session(self):
pass
def _run_inference(self, image: np.ndarray) -> dict:
return {}
def _detect_on_image(self, image: np.ndarray) -> dict:
resized_image = self._resize_image(image)
return self._run_inference(resized_image)
def detect_on_images(self, *images: List[np.ndarray]) -> Generator:
for image in images:
yield self._detect_on_image(image)
def _visualize(self, image: np.ndarray, detections: dict) -> np.ndarray:
return image
def visualize_detection_on_images(self, *images: List[np.ndarray]) -> Generator:
for image in images:
detection = self._detect_on_image(image)
yield self._visualize(image, detection)
|
|
"""
Demonstrate the use of motmot.wxglvideo.simple_overlay.
"""
import pkg_resources
import numpy
import wx
import motmot.wxglvideo.demo as demo
import motmot.wxglvideo.simple_overlay as simple_overlay
SIZE=(240,320)
class DemoOverlapApp( demo.DemoApp ):
def OnAddDisplay(self,event):
if not hasattr(self, 'overlay_canvas'):
self.main_panel = wx.Panel( self.target_panel )
self.main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_panel.SetSizer(self.main_panel_sizer)
self.target_box.Add( self.main_panel, 1, wx.EXPAND )
self.overlay_canvas = simple_overlay.DynamicImageCanvas(self.main_panel)
self.main_panel_sizer.Add( self.overlay_canvas, 1, wx.EXPAND )
self.horiz_panel = wx.Panel( self.main_panel )
self.horiz_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.horiz_panel.SetSizer(self.horiz_panel_sizer)
self.main_panel_sizer.Add( self.horiz_panel, 0, wx.EXPAND )
self.target_panel.Layout()
self.count = 0
self.id_vals = []
self.widgets = {}
self.count += 1
ni = numpy.random.uniform( 0, 255, SIZE).astype(numpy.uint8)
id_val = 'id %d'%self.count
if 1:
ctrl = wx.StaticText( self.horiz_panel, label=id_val)
self.horiz_panel_sizer.Add( ctrl, wx.LEFT, border=5 )
btn = wx.Button(self.horiz_panel, -1, "close")
btn.id_val = id_val # store data in wx object
wx.EVT_BUTTON(btn, btn.GetId(), self.OnCloseView)
self.horiz_panel_sizer.Add( btn, 0, wx.RIGHT, border=5 )
self.widgets[id_val] = [ctrl, btn]
self.overlay_canvas.update_image(id_val, ni)
self.id_vals.append( id_val )
self.target_panel.Layout()
def OnTimer(self, event):
if hasattr(self,'id_vals'):
points = [ (10,10) ]
linesegs = [ (20,10, 20,30) ]
for id_val in self.id_vals:
ni = numpy.random.uniform( 0, 255, SIZE).astype(numpy.uint8)
self.overlay_canvas.update_image_and_drawings(id_val, ni,
points=points,
linesegs=linesegs,
)
def OnCloseView(self, event):
widget = event.GetEventObject()
id_val = widget.id_val # retrieve id value
idx = self.id_vals.index(id_val)
del self.id_vals[idx]
self.overlay_canvas.delete_image(id_val)
for widget in self.widgets[id_val]:
self.horiz_panel_sizer.Remove( widget )
widget.Destroy()
self.horiz_panel_sizer.Layout()
def main():
import os
if int(os.environ.get('NO_REDIRECT','0')):
kw = {}
else:
kw = dict(redirect=True,filename='demo.log')
app = DemoOverlapApp(**kw)
app.MainLoop()
if __name__=='__main__':
main()
|
|
"""
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import tflearn
import actor
import critic
from replay_buffer import ReplayBuffer
# ==========================
# Training Parameters
# ==========================
# Max training steps
MAX_EPISODES = 50000
# Max episode length
MAX_EP_STEPS = 1000
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.0001
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99
# Soft target update param
TAU = 0.001
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'Pendulum-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
# Added exploration noise
a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
s2, r, terminal, info = env.step(a[0])
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
print ('| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def main(_):
with tf.Session() as sess:
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
# Ensure action bound is symmetric
assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim,
CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars())
if GYM_MONITOR_EN:
if not RENDER_ENV:
env = wrappers.Monitor(
env, MONITOR_DIR, video_callable=False, force=True)
else:
env = wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic)
if GYM_MONITOR_EN:
env.monitor.close()
if __name__ == '__main__':
tf.app.run()
|
|
# coding: utf8
def get_t1_freesurfer_custom_file():
import os
custom_file = os.path.join(
"@subject",
"@session",
"t1",
"freesurfer_cross_sectional",
"@subject_@session",
"surf",
"@hemi.thickness.fwhm@fwhm.fsaverage.mgh",
)
return custom_file
def get_pet_surface_custom_file(acq_label, suvr_reference_region):
import os
custom_file = os.path.join(
"@subject",
"@session",
"pet",
"surface",
f"@subject_@session_task-rest_acq-{acq_label}_pet"
f"_space-fsaverage_suvr-{suvr_reference_region}_pvc-iy_hemi-@hemi_fwhm-@fwhm_projection.mgh",
)
return custom_file
def init_input_node(parameters, base_dir, subjects_visits_tsv):
"""Initialize the pipeline.
This function will:
- Create `surfstat_results_dir` in `base_dir`/<group_id> for SurfStat;
- Save pipeline parameters in JSON file;
- Copy TSV file with covariates;
- Print begin execution message.
"""
import json
import os
import shutil
from clinicaml.pipelines.statistics_surface.statistics_surface_utils import (
create_glm_info_dictionary,
)
from clinicaml.utils.ux import print_begin_image
group_id = "group-" + parameters["group_label"]
# Create surfstat_results_dir for SurfStat
surfstat_results_dir = os.path.join(base_dir, group_id)
os.makedirs(surfstat_results_dir, exist_ok=True)
# Save pipeline parameters in JSON file
glm_dict = create_glm_info_dictionary(subjects_visits_tsv, parameters)
json_filename = os.path.join(surfstat_results_dir, group_id + "_glm.json")
with open(json_filename, "w") as json_file:
json.dump(glm_dict, json_file, indent=4)
# Copy TSV file with covariates
tsv_filename = os.path.join(surfstat_results_dir, group_id + "_covariates.tsv")
shutil.copyfile(subjects_visits_tsv, tsv_filename)
# Print begin message
list_keys = ["AnalysisType", "Covariates", "Contrast", "FWHM", "ClusterThreshold"]
list_values = [
parameters["glm_type"],
parameters["covariates"],
parameters["contrast"],
str(parameters["full_width_at_half_maximum"]),
str(parameters["cluster_threshold"]),
]
group_id = "group-" + parameters["group_label"]
print_begin_image(group_id, list_keys, list_values)
return parameters["group_label"], surfstat_results_dir
def get_string_format_from_tsv(tsv_file):
"""
Determine string format from TSV file.
If the TSV file is like:
participant_id session_id sex group age
sub-CLNC0001 ses-M00 Female CN 71.1
sub-CLNC0002 ses-M00 Male CN 81.3
sub-CLNC0003 ses-M00 Male CN 75.4
The columns of the TSV file contains consecutively strings, strings,
strings, strings and float. The string_format is therefore "%s %s %s %s %f".
Args:
tsv_file: TSV file.
Returns:
String formatting of the TSV file (e.g. "%s %s %s %s %f")
"""
import pandas as pd
demographics_df = pd.read_csv(tsv_file, sep="\t")
def dtype_to_str_format(dtype):
"""Convert pandas dtypes (e.g. int64) to string format (e.g. %d)"""
import numpy as np
if dtype == np.int64:
str_format = "%d"
elif dtype == np.float64:
str_format = "%f"
elif dtype == np.object:
str_format = "%s"
else:
raise ValueError("Unknown dtype (given: %s)" % dtype)
return str_format
list_str_format = []
for column in demographics_df.columns:
list_str_format.append(dtype_to_str_format(demographics_df[column].dtype))
return " ".join(list_str_format)
def covariates_to_design_matrix(contrast, covariates=None):
"""
Generate design matrix for SurfStat based on the contrast and the optional list of covariates.
Design matrix "1 + <contrast> + <covariate_1> + ... + <covariate_n>"
Example:
>>> from clinicaml.pipelines.statistics_surface.statistics_surface_utils import covariates_to_design_matrix
>>> covariates_to_design_matrix('group', 'age sex group')
1 + group + age + sex
>>> covariates_to_design_matrix('group', 'age')
1 + group + age
>>> covariates_to_design_matrix('group', None)
1 + group
"""
if covariates:
# Convert string to list while handling case where several spaces are present
list_covariates = list(set(covariates.split(" ")))
try:
list_covariates.remove("")
except ValueError:
pass
if contrast in list_covariates:
design_matrix = "1 + " + " + ".join(
covariate for covariate in list_covariates
)
else:
design_matrix = (
"1 + "
+ contrast
+ " + "
+ " + ".join(covariate for covariate in list_covariates)
)
else:
design_matrix = "1 + " + contrast
return design_matrix
def run_matlab(caps_dir, output_dir, subjects_visits_tsv, pipeline_parameters):
"""
Wrap the call of SurfStat using clinicasurfstat.m Matlab script.
Args:
caps_dir (str): CAPS directory containing surface-based features
output_dir (str): Output directory that will contain outputs of clinicasurfstat.m
subjects_visits_tsv (str): TSV file containing the GLM information
pipeline_parameters (dict): parameters of StatisticsSurface pipeline
"""
import os
from nipype.interfaces.matlab import MatlabCommand, get_matlab_command
import clinicaml.pipelines as clinica_pipelines
from clinicaml.pipelines.statistics_surface.statistics_surface_utils import (
covariates_to_design_matrix,
get_string_format_from_tsv,
)
from clinicaml.utils.check_dependency import check_environment_variable
path_to_matlab_script = os.path.join(
os.path.dirname(clinica_pipelines.__path__[0]), "lib", "clinicasurfstat"
)
freesurfer_home = check_environment_variable("FREESURFER_HOME", "FreeSurfer")
MatlabCommand.set_default_matlab_cmd(get_matlab_command())
matlab = MatlabCommand()
matlab.inputs.paths = path_to_matlab_script
matlab.inputs.script = """
clinicasurfstat('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %d, '%s', %.3f, '%s', %.3f, '%s', %.3f);
""" % (
os.path.join(caps_dir, "subjects"),
output_dir,
subjects_visits_tsv,
covariates_to_design_matrix(
pipeline_parameters["contrast"], pipeline_parameters["covariates"]
),
pipeline_parameters["contrast"],
get_string_format_from_tsv(subjects_visits_tsv),
pipeline_parameters["glm_type"],
pipeline_parameters["group_label"],
freesurfer_home,
pipeline_parameters["custom_file"],
pipeline_parameters["measure_label"],
"sizeoffwhm",
pipeline_parameters["full_width_at_half_maximum"],
"thresholduncorrectedpvalue",
0.001,
"thresholdcorrectedpvalue",
0.05,
"clusterthreshold",
pipeline_parameters["cluster_threshold"],
)
# This will create a file: pyscript.m , the pyscript.m is the default name
matlab.inputs.mfile = True
# This will stop running with single thread
matlab.inputs.single_comp_thread = False
matlab.inputs.logfile = (
"group-" + pipeline_parameters["group_label"] + "_matlab.log"
)
# cprint("Matlab logfile is located at the following path: %s" % matlab.inputs.logfile)
# cprint("Matlab script command = %s" % matlab.inputs.script)
# cprint("MatlabCommand inputs flag: single_comp_thread = %s" % matlab.inputs.single_comp_thread)
# cprint("MatlabCommand choose which matlab to use(matlab_cmd): %s" % get_matlab_command())
matlab.run()
return output_dir
def create_glm_info_dictionary(tsv_file, pipeline_parameters):
"""Create dictionary containing the GLM information that will be stored in a JSON file."""
out_dict = {
# Clinica compulsory arguments
"AnalysisType": pipeline_parameters["glm_type"],
"DesignMatrix": covariates_to_design_matrix(
pipeline_parameters["contrast"], pipeline_parameters["covariates"]
),
"StringFormatTSV": get_string_format_from_tsv(tsv_file),
"Contrast": pipeline_parameters["contrast"],
"GroupLabel": pipeline_parameters["group_label"],
# Optional arguments
"Covariates": pipeline_parameters["covariates"],
"FWHM": pipeline_parameters["full_width_at_half_maximum"],
# Optional arguments for custom pipeline
"custom_file": pipeline_parameters["custom_file"],
"measure_label": pipeline_parameters["measure_label"],
# Advanced arguments (i.e. tricky parameters)
"ThresholdUncorrectedPvalue": 0.001,
"ThresholdCorrectedPvalue": 0.05,
"ClusterThreshold": pipeline_parameters["cluster_threshold"],
}
# Optional arguments for inputs from pet-surface pipeline
if (
pipeline_parameters["acq_label"]
and pipeline_parameters["suvr_reference_region"]
):
out_dict["acq_label"] = pipeline_parameters["acq_label"]
out_dict["suvr_reference_region"] = pipeline_parameters["suvr_reference_region"]
return out_dict
def save_to_caps(source_dir, caps_dir, overwrite_caps, pipeline_parameters):
"""Save `source_dir`/ to CAPS folder.
This function copies outputs of `source_dir`/ to
`caps_dir`/groups/<group_id>/<statistics>/surfstat_<glm_type>/
The `source_dir`/ folder should contain the following elements:
- group-<group_label>_<group_1_or_2>-lt-<group_1_or_2>_measure-<measure>_fwhm-<label>_suffix.ext
or
- group-<group_label>_correlation-<label>_contrast-{-|+}_measure-<measure>_fwhm-<label>_suffix.ext
and
- group-<group_label>_covariates.tsv
- group-<group_label>_glm.json
Raise:
NotImplementedError: If overwrite_caps=True.
"""
import os
import shutil
from clinicaml.utils.ux import print_end_image
group_id = "group-" + pipeline_parameters["group_label"]
if pipeline_parameters["glm_type"] == "group_comparison":
surfstat_folder = "surfstat_" + pipeline_parameters["glm_type"]
elif pipeline_parameters["glm_type"] == "correlation":
surfstat_folder = "surfstat_" + pipeline_parameters["glm_type"] + "_analysis"
else:
raise NotImplementedError(
"The other GLM situations have not been implemented in this pipeline."
)
destination_dir = os.path.join(
os.path.expanduser(caps_dir), "groups", group_id, "statistics", surfstat_folder
)
if overwrite_caps:
raise NotImplementedError("save_to_caps(overwrite_caps=True) not implemented")
shutil.copytree(source_dir, destination_dir, symlinks=True)
print_end_image(group_id)
|
|
import numpy as np
from typing import Callable, List, Optional
from lab1.src.onedim.one_dim_search import dichotomy_method
from lab2.src.methods.conjugate_method import conjugate_direction_method
from lab2.src.methods.newton_step_strategy import ConstantStepStrategy
DEFAULT_EPS = 1e-6
DEFAULT_MAX_ITERS = 1000
def newton_method(f: Callable[[np.ndarray], float],
f_grad: Callable[[np.ndarray], np.ndarray],
f_hess: Callable[[np.ndarray], np.ndarray],
start: np.ndarray,
eps: float = DEFAULT_EPS,
max_iters: int = DEFAULT_MAX_ITERS,
trajectory: Optional[List] = None):
x_prev = start
if trajectory is not None:
trajectory.append(start)
iters = 1
strategy = ConstantStepStrategy(f, 1e-10)
while iters < max_iters:
x_wave, _ = conjugate_direction_method(f_hess(x_prev), f_grad(x_prev), x_prev)
# alpha = strategy.next_step(x_prev, x_wave)
alpha, _, _ = dichotomy_method(lambda a: f(x_prev + a * x_wave), 0, 10, 1e-9)
x_k = x_prev + alpha * x_wave
if trajectory is not None:
trajectory.append(x_k)
if np.linalg.norm(x_prev - x_k) < eps:
return x_k, iters
x_prev = x_k
iters += 1
return x_prev, iters
|
|
'''Functions used in solver class
'''
import numpy as np
from state import State
def cosphi(m):
"""Get operator for x-component of dipole moment projection.
Parameters
----------
m : int
Maxium energy quantum number.
Returns
-------
cosphi : numpy.array, shape=(2m+1,2m+1)
Matrix representation of operator for x-component of dipole
moment projection.
"""
cosphi_input=np.full((2*m),0.5)
cosphi=np.diag(cosphi_input,k=1)+np.diag(cosphi_input,k=-1)
return cosphi
def sinphi(m):
"""Get operator for y-component of dipole moment projection.
Parameters
----------
m : int
Maxium energy quantum number.
Returns
-------
sinphi : numpy.array, shape=(2m+1,2m+1)
Matrix representation of operator for y-component of dipole
moment projection.
"""
sinphi_input1=np.full((2*m),0.5j)
sinphi_input2=np.full((2*m),-0.5j)
sinphi=np.diag(sinphi_input1,k=1)+np.diag(sinphi_input2,k=-1)
return sinphi
def ddphi(m):
"""Calculates the operator to get the first derivative of phi,
used for solving b-vector in solvers.PathToField._get_b method.
Parameters
----------
m : int
Maxium energy quantum number.
Returns
-------
ddphi : numpy.array, shape=(2m+1,2m+1)
Matrix representation of the operator to get the first
derivative of phi.
"""
ddphi_input = np.arange(-m,m+1)
ddphi = 1j*np.diag(ddphi_input,k=0)
return ddphi
def d2dphi2(m):
"""Calculates the operator to get the second derivative of phi,
used for solving b-vector in solvers.PathToField._get_b method.
Parameters
----------
m : int
Maxium energy quantum number.
Returns
-------
d2dphi2 : numpy.array, shape=(2m+1,2m+1)
Matrix representation of the operator to get the second
derivative of phi.
"""
d2dphi2_input = np.arange(-m,m+1)**2
d2dphi2 = -1*np.diag(d2dphi2_input,k=0)
return d2dphi2
def d2dt2(x,dt):
"""Calculate second derivative of a sequence of scalar numbers.
Assume the time differecne between two adjacent points is the
same throughout the entire sequence.
This is used for solving b-vector in solvers.PathToField._get_b
method.
Parameters
----------
x : numpy.array, shape=(n,)
An 1-D sequence of numbers.
dt : float
Step size of time, i.e. the time difference between two
adjacent numbers in `x`.
Returns
-------
d2x : numpy.array, shape=(n,)
The second derivative of the original sequence `x`.
"""
# initial step (finite differences method)
n=len(x)
d2x = np.zeros(n,dtype=float)
d2x[0] = (x[2]-2*x[1]+x[0])/(dt**2)
for i in range(1,n-1):
d2x[i] = ((x[i+1]-x[i])-(x[i]-x[i-1]))/(dt**2)
d2x[n-1] = (x[n-3]-2*x[n-2]+x[n-1])/(dt**2)
return d2x
|
|
import numpy as np
import matplotlib.pyplot as plt
def update_vorticity(g, w, lam, u, v, dt, dx, dy, kx, ky):
# Add the +g and +w for forward euler, then call this func instead of
# vorticity_rk4 for faster computation
gnew = dt*((2+lam)*g2_avg(g, dx, dy)- (1+lam)*g**2 - convect(g, u, v, kx, ky)) #+g
wnew = dt*(g*w - convect(w, u, v, kx, ky)) #+w
return gnew, wnew
def vorticity_rk4(g, w, lam, u, v, dt, dx, dy, kx, ky):
kg1, kw1 = update_vorticity(g,w, lam, u,v,dt,dx,dy,kx,ky)
kg2, kw2 = update_vorticity(g+kg1/2,w+kw1/2, lam, u,v,dt,dx,dy,kx,ky)
kg3, kw3 = update_vorticity(g+kg2/2,w+kw2/2, lam, u,v,dt,dx,dy,kx,ky)
kg4, kw4 = update_vorticity(g+kg3,w+kw3, lam, u,v,dt,dx,dy,kx,ky)
g_new = g + 1/6*(kg1+2.*kg2+2.*kg3+kg4)
w_new = w + 1/6*(kw1+2*kw2+2*kw3+kw4)
return g_new, w_new
def update_velocities(g_hat, w_hat, kx, ky, k2):
uhat = 1j*(kx*g_hat+ky*w_hat)/(k2)
vhat = 1j*(ky*g_hat-kx*w_hat)/(k2)
# Assume no zero mode
Ny, Nx = np.shape(k2)
uhat[int(Ny/2),int(Nx/2)]=0
vhat[int(Ny/2),int(Nx/2)]=0
u = np.real(np.fft.ifft2(np.fft.ifftshift(uhat)))
v = np.real(np.fft.ifft2(np.fft.ifftshift(vhat)))
return u, v
def g2_avg(g, dx, dy):
avg = 1.0/(2*np.pi)**2 * np.sum(g**2)*dx*dy
return avg
def convect(C, u, v, kx, ky):
C_hat = np.fft.fftshift(np.fft.fft2(C))
C_x = np.real(np.fft.ifft2(np.fft.ifftshift(1j*kx*C_hat)))
C_y = np.real(np.fft.ifft2(np.fft.ifftshift(1j*ky*C_hat)))
convec_term = (u*C_x + v*C_y)
return convec_term
def initial_conditions(xx, yy):
X, Y = np.meshgrid(xx, yy)
#w0 = -np.sin(X) - np.cos(X)*np.cos(Y)
#g0 = np.sin(X)*np.sin(Y) - np.cos(Y)
#w0 = np.sin(X)*np.sin(Y)
#g0 = np.sin(X)*np.sin(Y)
w0 = np.cos(X+np.pi/2) * (1 + 2*np.sin(Y-np.pi/2))
g0 = np.sin(Y-np.pi/2)
return w0, g0
def blowup_test(g):
# infinity norm
norm_g = np.max(np.sum(np.abs(g), axis=1))
if norm_g >= 2**32 -1:
blowup = True
else:
blowup = False
return blowup
def euler_solve(N=256, dt=0.001, tfinal=2, lam=-3.0/2):
# Grid specifications
Nx, Ny = N, N
#dt, tfinal = 0.001, 2
#lam = -3.0/2
n_timesteps = int(np.floor(tfinal/dt))
print("~~ Euler Vorticity Solver ~~ \n")
print(f"##### \nParameters: \nGrid points = {Nx}x{Ny}")
print(f"final time = {tfinal}s")
# Grid spacing
dx = 2.*np.pi/Nx
dy = 2.*np.pi/Ny
# Discretized grid
xx = np.arange(0, Nx)*dx
yy = np.arange(0, Ny)*dy
print("Setting intial conditions... \n")
w, g = initial_conditions(xx, yy)
w_hat = np.fft.fftshift(np.fft.fft2(w))
g_hat = np.fft.fftshift(np.fft.fft2(g))
# Matrices of wavesnumbers
kx = np.ones((1, Ny)).T * (np.arange(-Nx/2, Nx/2))
ky = np.reshape(np.arange(-Ny/2, Ny/2), (1, Ny)).T * np.ones((1, Nx))
k2 = kx**2+ky**2
k2[int(Nx/2),int(Nx/2)]=1
dealias = (np.abs(kx) < (2.0/3.0)*(Nx/2.0)) * (np.abs(ky)<(2.0/3.0)*(Ny/2.0))
print("Entering time loop... \n")
# Update the vorticity and stretching terms in each timestep
for iteration_time in range(0, n_timesteps):
if np.mod(iteration_time, 10)==0:
seconds = np.round(iteration_time*dt,4)
print(f"Time: {seconds}s")
plt.pcolormesh(yy, xx, g.T, cmap="hot")
plt.colorbar()
plt.clim(vmin=-2, vmax=2)
plt.title("2D Euler - Vorticity")
plt.pause(1e-8)
plt.clf()
# Implement numerical consistancy checks...
u, v = update_velocities(g_hat, w_hat, kx, ky, k2)
g, w = vorticity_rk4(g, w, lam, u, v, dt, dx, dy, kx, ky)
blowup = blowup_test(g)
if blowup:
seconds = iteration_time*dt
print(f"Solution has blownup at T* = {seconds} \n")
print("Exiting loop.")
plt.pcolormesh(yy, xx, g.T, cmap='hot')
plt.title("2D Euler - Vorticity")
plt.colorbar()
plt.show()
break
# dealias
w_hat = np.fft.fftshift(np.fft.fft2(w))*dealias
g_hat = np.fft.fftshift(np.fft.fft2(g))*dealias
#print("Simulation finished. \n Showing final plot... \n")
#plt.pcolormesh(yy, xx, w, cmap="hot")
#plt.title("2D Euler - Vorticity")
#plt.colorbar()
#plt.clim(vmin=-2, vmax=2)
#plt.show()
# return g
euler_solve(N=128, dt = 0.001, tfinal=10, lam=0)
"""
dt = 0.001
tfinal = 1.0
lam =0
g_32 = euler_solve(N=32, dt=dt, tfinal=tfinal, lam=lam)
g_64 = euler_solve(N=64, dt=dt, tfinal=tfinal, lam=lam)
g_128 = euler_solve(N=128, dt=dt, tfinal=tfinal, lam=lam)
g_256 = euler_solve(N=256, dt=dt, tfinal=tfinal, lam=lam)
g_512 = euler_solve(N=512, dt=dt, tfinal=tfinal, lam=lam)
"""
|
|
import networkx as nx
import pandas as pd
def import_csv(filename):
""" import csv file into a Pandas dataframe """
return pd.read_csv(filename)
def preprocessing(filename):
""" make Pandas dataframe easier to work with by:
- deleting timestamp column
- making the names column into the row labels
"""
data = import_csv(filename)
del data['Timestamp'] #delete timestamp column
data = data.set_index('Name') # set names column to row labels
data.index.names = [None]
return data
def initialize_graph(data):
""" build a graph with the name/identifiers as nodes """
num_rows = data.shape[0]
G = nx.Graph()
row_names = []
for (name, b) in data.iterrows():
row_names.append(name)
G.add_node(name)
return G, row_names
def build_graph(data):
""" iterates through all question answers and adds an edge when people agree
"""
G, row_names = initialize_graph(data)
for question, answers in data.iteritems():
# print(answers)
for curr_name in row_names:
for compare_name in row_names:
if answers[curr_name] == answers[compare_name] and curr_name != compare_name:
G.add_edge(curr_name, compare_name)
return G
if __name__ == "__main__":
data = preprocessing("Survey.csv")
G = build_graph(data)
print(G.edges)
|
|
import numpy as np
from typing import Union
from talib import SMA
try:
from numba import njit
except ImportError:
njit = lambda a: a
from jesse.helpers import get_candle_source, slice_candles
def rma(candles: np.ndarray, length: int = 14, source_type="close", sequential=False) -> \
Union[float, np.ndarray]:
"""
Moving average used in RSI. It is the exponentially weighted moving average with alpha = 1 / length.
RETURNS Exponential moving average of x with alpha = 1 / y.
https://www.tradingview.com/pine-script-reference/#fun_rma
:param candles: np.ndarray
:param length: int - default: 14
:param source_type: str - default: close
:param sequential: bool - default: False
:return: Union[float, np.ndarray]
"""
# Pine change() = np.ediff1d(x), min max aynı olmalı
"""
print(an_array)
[ 3 23 5 67 12 15 89]
an_array = np.where(an_array > 20, 0, an_array)
print(an_array)
[ 3 0 5 0 12 15 0]
"""
if length < 1:
raise ValueError('Bad parameters.')
# Accept normal array too.
if len(candles.shape) == 1:
source = candles
else:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
res = rma_fast(source, length)
return res if sequential else res[-1]
def sma(s, l):
return SMA(s, l)
def rma_fast(source, _length):
"""
Pine script:
pine_rma(src, length) = >
alpha = 1 / length
sum = 0.0
sum := na(sum[1]) ? sma(src, length): alpha * src + (1 - alpha) * nz(sum[1])
"""
alpha = 1 / _length
newseries = np.copy(source)
for i in range(source.size):
if np.isnan(newseries[i - 1]):
newseries[i] = SMA(source, _length)
"""ret = np.cumsum(source, dtype=float)
ret[_length:] = ret[_length:] - ret[:-_length]
newseries[i] = ret[_length - 1:] / _length"""
else:
prev = newseries[i - 1]
if np.isnan(prev):
prev = 0
newseries[i] = alpha * source[i] + (1 - alpha) * prev
return newseries
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.