text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import rospy
import smach
import smach_ros
import message_filters
import tf
from tf import transformations
from tf import TransformListener
from tf import transformations
from geometry_msgs.msg import PoseStamped
import apc_msgs.srv
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Image
from apc_msgs.msg import PointcloudRGB
from apc_msgs.msg import ObjectProposals
from apc_msgs.msg import GraspCandidates
from apc_msgs.srv import DoSegmentation,DoSegmentationRequest
import time
import numpy as np
# from apc_msgs.msg import DoObjectProposal
class DecideGraspPoseStateFromPointCloud(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded', 'failed'],
input_keys=['object_2d_median','next_item_to_pick','cropped_cloud'],
output_keys=['goal_pose', 'goal_pose_array', 'pre_grasp_pose_array', 'move_group_name_array']
)
# wait for the services to appear
self.waitForService('/object_proposal_node/do_proposal')
self.waitForService('/segmentation_node/do_segmentation')
self.waitForService('/apc_grasping/detect_grasp_candidates')
self.waitForService('/apc_grasping/grasp_selection_from_candidates')
# create a proxy to that service
self.srv_proposal = rospy.ServiceProxy('/object_proposal_node/do_proposal', apc_msgs.srv.DoObjectProposal)
self.srv_segmentation = rospy.ServiceProxy('/segmentation_node/do_segmentation', apc_msgs.srv.DoSegmentation)
self.srv_graspCandidates = rospy.ServiceProxy('/apc_grasping/detect_grasp_candidates', apc_msgs.srv.DetectGraspCandidates)
self.srv_selectGraspFromCandidates = rospy.ServiceProxy('/apc_grasping/grasp_selection_from_candidates', apc_msgs.srv.SelectGraspFromCandidates)
# setup subscribers for RGB image and point cloud topics
self.sub_points = message_filters.Subscriber('/realsense/points_aligned', PointCloud2)
self.sub_rgb = message_filters.Subscriber('/realsense/rgb/image_raw', Image)
# self.sub_points = message_filters.Subscriber('/realsense/points_aligned_cropped', PointCloud2)
# self.sub_rgb = message_filters.Subscriber('/realsense/rgb/image_raw_cropped', Image)
self.points_pub = rospy.Publisher('pointsBeforeChris', PointCloud2, queue_size=10)
# approximately synchronize RGB, and pointcloud topics
self.ats = message_filters.ApproximateTimeSynchronizer([self.sub_rgb, self.sub_points], 1, 2.0)
self.ats.registerCallback(self.RGBPoints_callback)
self.ignore_rgbpoints = True
self.current_rgb = None
self.current_pointcloud = None
self.tf_listener = TransformListener()
self.maxTrys = 5
rospy.loginfo('Ok.')
# ==========================================================
def waitForService(self, service):
rospy.loginfo('Waiting for service %s to come online ...' % service)
try:
rospy.wait_for_service(service, timeout=0.1)
except:
rospy.logerr('Service %s not available. Restart and try again.' % service)
return False
else:
return True
# ==========================================================
def RGBPoints_callback(self, rgb, pointcloud):
if not self.ignore_rgbpoints:
self.ignore_rgbpoints = True
rospy.loginfo('Received topics.')
self.current_rgb = rgb
# self.current_pointcloud = pointcloud
else:
# rospy.loginfo('Ignoring incoming topics.')
pass
# ==========================================================
def insideBoundingBox(self, p, bb):
if not (p[0] >= bb.top_left.x and p[0] <= bb.bottom_right.x):
return False
else:
if not (p[1] >= bb.top_left.y and p[1] <= bb.bottom_right.y):
return False
else:
return True
# ==========================================================
def distFromCenter(self, p, bb):
if not self.insideBoundingBox(p, bb):
return np.inf
else:
center = np.array([(bb.bottom_right.x + bb.top_left.x) / 2.0, (bb.bottom_right.y + bb.top_left.y) / 2.0])
pp = np.array(p)
d = center - pp
return np.sqrt(np.sum(d**2))
# ==========================================================
def execute(self, userdata):
success = False
try_counter = 0
while not success and try_counter < self.maxTrys:
self.current_rgb = None
self.current_pointcloud = userdata['cropped_cloud']
try_counter += 1
rospy.loginfo("Looking for grasp poses. Try %d of %d." % (try_counter, self.maxTrys))
# get the current pointcloud and RGB image
self.ignore_rgbpoints = False
r = rospy.Rate(10)
while self.current_rgb is None:
rospy.loginfo('Waiting for synchronized RGB and Pointcloud topics ...')
time.sleep(0.15)
# call the DoSegmentation service, receive the segmented pointcloud in the response
rospy.loginfo('Calling segmentation service ... ')
req = DoSegmentationRequest()
req.input_cloud = self.current_pointcloud
segmented_cloud = self.srv_segmentation.call(req).segmented_cloud
# form a PointcloudRGB message rofor the proposal service
pointCloudRGB = PointcloudRGB(segmented_cloud, self.current_rgb)
# call the DoProposal service, receive a ObjectProposals instance in the response
rospy.loginfo('Calling object proposal service ... ')
proposals = self.srv_proposal.call(pointCloudRGB).object_proposals
# go through the bounding box coordinates, determine which our userdata['object_2d_median'] point is most central
if 'object_2d_median' in userdata:
p = userdata['object_2d_median']
else:
rospy.logwarn('No object median defined in the userdata!')
p = [ 100, 100 ] # Juxi's hack!
distances = []
for bb in proposals.bounding_boxes:
distances.append(self.distFromCenter(p, bb))
distances = np.array(distances)
# get a handle to the corresponding point cloud segment
if np.min(distances) == np.inf:
rospy.logwarn("Object median point did not fall into any proposal box.")
idx = None
else:
idx = np.argmin(distances)
if idx is not None:
pointcloud_segment = proposals.segments[idx]
pointcloud_segment.header.frame_id = self.current_pointcloud.header.frame_id
# call Chris' grasp pose service with that pointcloud
rospy.loginfo('Calling grasp point detection service ... ')
self.points_pub.publish(pointcloud_segment)
grasp_candidates_response = self.srv_graspCandidates.call(pointcloud_segment, 0.01)
if len(grasp_candidates_response.grasp_candidates.grasp_utilities) == 0:
rospy.logwarn("Could not find any suitable grasp pose!")
success = False
time.sleep(0.5)
else:
bin_name = userdata['next_item_to_pick']['bin']
grasp_selection_response = self.srv_selectGraspFromCandidates.call(
grasp_candidates_response.grasp_candidates,
bin_name, {'left_arm','left_arm_90'})
# bin_name, {'left_arm'})
if grasp_selection_response.success.data == False:
print 'Grasp selection found no reachable grasps :(\n'
success = False
break
# print '\nGrasp Selection Response = '
# print grasp_selection_response
# print '---------------------\n'
# transform it into a fixed coordinate frame
# self.tf_listener.waitForTransform('/base', pose.header.frame_id, rospy.Time(0), rospy.Duration(4.0))
# common_time = self.tf_listener.getLatestCommonTime('/base',pose.header.frame_id)
# pose.header.stamp = common_time
# pose_fixed_frame = []
# for myPose in response.grasp_candidates.grasp_poses.poses:
# pose.pose = myPose
# # Need to do in z direction as we are in the camera frame
# pose.pose.position.z += 0.04 # TODO replace with param
# pose_fixed_frame.append(self.tf_listener.transformPose('/base', pose))
# store it for the other states in the state machine
userdata['goal_pose'] = grasp_selection_response.selected_grasps.poses[0]
userdata['goal_pose_array'] = grasp_selection_response.selected_grasps
userdata['pre_grasp_pose_array'] = grasp_selection_response.selected_pre_grasps
userdata['move_group_name_array'] = grasp_selection_response.grasp_move_group
# rospy.loginfo('Found best grasp pose at:')
# print pose_fixed_frame
success = True
# clean up
self.current_rgb = None
self.current_pointcloud = None
if success:
return 'succeeded'
else:
return 'failed'
# ==========================================================
# ==========================================================
# ==========================================================
if __name__ == '__main__':
rospy.init_node('grasp_decision_test')
dg = DecideGraspPoseStateFromPointCloud()
dg.execute(None)
rospy.spin()
|
{"hexsha": "0b839bf026ed25aee300f21392507580457c42ec", "size": 10084, "ext": "py", "lang": "Python", "max_stars_repo_path": "apc_state_machine/state_machine/scripts/OBSOLETE_decideGraspPoseStateFromPointCloud.py", "max_stars_repo_name": "Juxi/apb-baseline", "max_stars_repo_head_hexsha": "fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-02-06T10:24:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T20:59:52.000Z", "max_issues_repo_path": "apc_state_machine/state_machine/scripts/OBSOLETE_decideGraspPoseStateFromPointCloud.py", "max_issues_repo_name": "Juxi/apb-baseline", "max_issues_repo_head_hexsha": "fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apc_state_machine/state_machine/scripts/OBSOLETE_decideGraspPoseStateFromPointCloud.py", "max_forks_repo_name": "Juxi/apb-baseline", "max_forks_repo_head_hexsha": "fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-15T08:33:37.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-05T07:29:38.000Z", "avg_line_length": 41.1591836735, "max_line_length": 152, "alphanum_fraction": 0.5933161444, "include": true, "reason": "import numpy", "num_tokens": 2019}
|
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
folder = '/home/sebastian/Programs/iblrig/tasks/_iblrig_tasks_ephysChoiceWorld/sessions/' # location of datasets
show = 0 # whether or not to show plots (they will be saved anyway)
plot = 0 # whether or not to compute the (somewhat) time-consuming all-trial plots
plt.rcParams.update({'font.size': 12})
all_blocks = np.zeros(0)
for file in os.listdir(folder):
if file.endswith('len_blocks.npy'):
continue
# Load data and bring into form for Anne's plotting procedure
data = np.load(folder + file)
block_lens = np.load(folder + file[:-4] + '_len_blocks.npy')
all_blocks = np.concatenate((all_blocks, block_lens[1:]))
behav = pd.DataFrame({'probabilityLeft':50, 'signed_contrast': 100 * np.sign(data[:,0]) * data[:,1], 'trial_id':range(np.sum(block_lens))})
i = 90
prob = 80 if np.mean(data[90 : 90 + block_lens[1], 0]) > 0 else 20
for block in block_lens[1:]:
behav.at[i:i+block, 'probabilityLeft'] = prob
i += block
prob = 100 - prob
behav['probability_left_block'] = (behav.probabilityLeft - 50) * 2
# Count streaks of 0's interlude
current = 0
streak = 0
print(file)
for i, n in enumerate(data[:,1]):
if n == 0:
current += 1
else:
current = 0
streak = max(current, streak)
print('Longest sequence of 0\'s is {}'.format(streak))
# Plot contrast distributions
plt.figure(figsize=(20, 14))
sns.countplot(x='signed_contrast', data=behav, color='b')
print('Proportion of stimuli on one side is {}'.format(np.sum(np.sign(behav['signed_contrast']) == 1) / (len(behav) - np.sum(np.sign(behav['signed_contrast']) == 0))))
plt.savefig('./counts_' + file[:-4] + '.png')
if show:
plt.show()
else:
plt.close()
# Plot contrast level of all trials + underlying block
if plot:
cmap = sns.diverging_palette(20, 220, n=len(behav['probabilityLeft'].unique()), center="dark")
plt.figure(figsize=(20, 14))
plt.gcf().suptitle(file, fontsize=16)
for i in range(4):
plt.subplot(4, 1, 1 + i)
sns.lineplot(x="trial_id", y="probability_left_block", data=behav, color='k', legend=0)
sns.lineplot(x="trial_id", y="signed_contrast", data=behav, hue=np.sign(behav.signed_contrast), palette=cmap, linewidth=0, marker='.', mec=None, legend=0)
left, right = i * 500, (i+1) * 500
if i == 3:
plt.xlabel('Trial number', fontsize=16)
right = np.sum(block_lens)
else:
plt.xlabel(None)
plt.xlim(left, right)
plt.ylabel('Signed contrast (%)', fontsize=16)
plt.tight_layout()
plt.savefig('./' + file[:-4] + '.png')
if show:
plt.show()
else:
plt.close()
print(min(all_blocks))
print(max(all_blocks))
n_bins = 20
n = 1000000
# Plot empirical block dist. (from n simulation) vs block lengths in given sequences
# block length draw function
def draw_block_len():
x = np.random.exponential(50.)
if 10 <= x <= 91: # effectively never picks 90 if you put 90 here
return int(x) + 10
else:
return draw_block_len()
# simulate block lengths
emp_blocks = np.zeros(n, dtype=np.int32)
for i in range(n):
emp_blocks[i] = draw_block_len()
counts = np.bincount(emp_blocks)
plt.hist(all_blocks, n_bins, density=True, label='block length counts')
plt.plot(range(20, 101), counts[20:101] / n, 'r', label='Monte Carlo estimates')
plt.legend()
plt.xlabel('Block length')
plt.ylabel('Normalized frequency')
plt.savefig('./block_length_dist.png')
plt.show()
|
{"hexsha": "139132199bb945aa249774c4ebbef367ebff436f", "size": 3782, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/sequence_analysis.py", "max_stars_repo_name": "ineslaranjeira/analysis", "max_stars_repo_head_hexsha": "ef262cc0d4e04ffb59d81aeb8f135790778b8bbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-11-22T17:36:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-17T10:59:59.000Z", "max_issues_repo_path": "python/sequence_analysis.py", "max_issues_repo_name": "ineslaranjeira/analysis", "max_issues_repo_head_hexsha": "ef262cc0d4e04ffb59d81aeb8f135790778b8bbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-01-13T17:47:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-21T18:31:47.000Z", "max_forks_repo_path": "python/sequence_analysis.py", "max_forks_repo_name": "ineslaranjeira/analysis", "max_forks_repo_head_hexsha": "ef262cc0d4e04ffb59d81aeb8f135790778b8bbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-05-30T17:55:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-06T18:45:48.000Z", "avg_line_length": 32.3247863248, "max_line_length": 171, "alphanum_fraction": 0.6237440508, "include": true, "reason": "import numpy", "num_tokens": 1022}
|
from numpy.random import choice
from srcs.agent.Tree import Tree
from srcs.agent.auxilliary import ucb
from srcs.agent.auxilliary import NodeAttr as NodeAttr
from enum import IntEnum
#
# An enum for the type of rollout policy to be used.
#
class RolloutPolicy(IntEnum):
RANDOM_ACTION = 0
RANDOM_ACTION_AVOIDING_HOLES = 1
ACTION_MAX_REWARD_AVOIDING_HOLES = 2
ACTION_MAX_REWARD = 3
#
# Class implementing the Partially Observable Monte Carlo Planning algorithm.
#
class POMCP:
def __init__(self, states, actions, obs, generator, gamma=0.95, c=1, threshold=0.005,
timeout=10000, no_particles=1200, rollout_depth=-1, policy_rollout_type=RolloutPolicy.ACTION_MAX_REWARD):
"""
Construct the POMCP agent
:param states: the set of all possible states
:param actions: the set of all possible actions
:param obs: the set of all possible observations
:param generator: the black box generator, i.e. environment
:param gamma: the discount factor
:param c: the exploration constant
:param threshold: the threshold below which discount is too small
:param timeout: the number of runs from node
:param no_particles: the number of particle in the filter
:param rollout_depth: the depth of rollouts, -1 means unused
:param policy_rollout_type: the type of rollout policy to use.
"""
# Check that the parameters are valid.
if gamma < 0 or gamma > 1:
raise ValueError("Gamma should between zero and one.")
# Initialize the attributes.
self.gamma = gamma
self.generator = generator
self.e = threshold
self.c = c
self.timeout = timeout
self.no_particles = no_particles
self.rollout_depth = rollout_depth
self.tree = Tree()
self.states = states
self.actions = actions
self.observations = obs
self.policy_rollout_type = policy_rollout_type
def best_child(self, node, use_ucb=True):
"""
Finds and returns the child with the highest value
If the parameter use_UCB=True, then the child's value is the UCB criterion
If the parameter use_UCB=False, then the child's value is the action's value
:param node: the node whose best child should be returned
:param use_ucb: whether the UCB or the action's value should be used
:return: the best child
"""
# Check that the beliefs are valid
if self.tree.nodes[node][NodeAttr.BELIEFS] == -1:
print("Invalid beliefs over state in POMCP.best_child.")
return None, None
# Find and return the best action and associated child
children = self.tree.nodes[node][NodeAttr.CHILDREN]
max_action = max(
children,
key=lambda action: self.compute_node_value(children[action], use_ucb)
)
return max_action, children[max_action]
def compute_node_value(self, node, use_ucb=True):
"""
Compute the value of the input node
:param node: the node whose value must be computed
:param use_ucb: whether the UCB or the action's value should be used
:return: the node's value
"""
if use_ucb:
parent = self.tree.nodes[node][NodeAttr.PARENT]
return ucb(
self.tree.nodes[parent][NodeAttr.VISITS],
self.tree.nodes[node][NodeAttr.VISITS],
self.tree.nodes[node][NodeAttr.VALUE],
self.c
)
return self.tree.nodes[node][NodeAttr.VALUE]
def search(self):
"""
Perform the POMCP search.
:return: the best action according to the search.
"""
# Get belief state of the root node
bh = self.root()[NodeAttr.BELIEFS].copy()
# Repeat simulations until timeout
for _ in range(self.timeout):
self.simulate(self.sample_belief_state(bh), -1, 0)
# Get best action
return self.best_child(-1, use_ucb=False)[0]
def rollout(self, s, depth, rollout_depth):
"""
Perform a roolout run from state 's'
:param s: the state from which the rollout must be performed
:param depth: the depth from the node representing the current state S_t
:param rollout_depth: the current depth of the rollout
:return: the value of the rollout run
"""
# Chech if rollout max depth has been reached
if 0 < self.rollout_depth <= rollout_depth:
return 0
# Check significance of update
if (self.gamma ** depth < self.e or self.gamma == 0) and depth != 0:
return 0
# Select an action according to the rollout policy, and retrieve new state and reward
state, reward = self.rollout_policy(self.policy_rollout_type, s)
# Compute action's value
return reward + self.gamma * self.rollout(state, depth + 1, rollout_depth + 1)
def rollout_policy(self, rollout_policy_type, state):
"""
Perform an action according to the rollout policy requested in input.
:param rollout_policy_type: the type of rollout policy to use.
:param state: the current state from which the action is performed.
:return: the state and reward following the action performed.
"""
policies = [
self.rollout_policy_random_action,
self.rollout_policy_random_action_avoiding_holes,
self.rollout_policy_max_reward_avoiding_holes,
self.rollout_policy_max_reward
]
return policies[rollout_policy_type](state)
def rollout_policy_random_action(self, state):
"""
Select a random action.
:param state: the current state.
:return: the next state and reward.
"""
action = choice(self.actions)
state, _, r = self.generator(state, action)
return state, r
def rollout_policy_random_action_avoiding_holes(self, state):
"""
Select a random action that does not lead into a hole.
:param state: the current state.
:return: the next state and reward.
"""
r = -1
sample_state = state
while r == -1:
action = choice(self.actions)
sample_state, _, r = self.generator(state, action)
return sample_state, r
def rollout_policy_max_reward_avoiding_holes(self, state):
"""
Select the action that seems to lead to the highest reward and avoid holes.
:param state: the current state.
:return: the next state and reward.
"""
i = 0
best_s = state
best_r = -1
while best_r == -1 or i < 10:
action = choice(self.actions)
sample_state, _, r = self.generator(state, action)
if r > best_r:
best_r = r
best_s = sample_state
i += 1
return best_s, best_r
def rollout_policy_max_reward(self, state):
"""
Select the action that seems to lead to the highest reward.
:param state: the current state.
:return: the next state and reward.
"""
i = 0
best_s = state
best_r = -1
while i < 10:
action = choice(self.actions)
sample_state, _, r = self.generator(state, action)
if r > best_r:
best_r = r
best_s = sample_state
i += 1
return best_s, best_r
def simulate(self, s, h, depth):
"""
Perform one iteration of MCTS, i.e. one simulation through the tree
:param s: the state from which the planning starts
:param h: the tree's node from which the simulation starts
:param depth: the current depth of the simulation
:return: the value of the simulation
"""
# Check significance of update
if (self.gamma ** depth < self.e or self.gamma == 0) and depth != 0:
return 0
# If the current node is a leaf of the tree
if self.tree.is_leaf_node(h):
# Expand all possible actions
for action in self.actions:
self.tree.expand_tree_from(h, action, is_action=True)
# Perform a rollout to evaluate the value of the current state
return self.rollout(s, depth, 0)
# Get best action and associated tree's node
best_action, best_node = self.best_child(h)
# Generate next state, observation and reward
next_s, next_obs, reward = self.generator(s, best_action)
# Get tree's node associated to the next observation
next_node = self.get_observation_node(best_node, next_obs)
# Estimate node Value
cum_reward = reward + self.gamma * self.simulate(next_s, next_node, depth + 1)
# Add current state to belief state
self.tree.nodes[h][NodeAttr.BELIEFS].append(s)
if len(self.tree.nodes[h][NodeAttr.BELIEFS]) > self.no_particles:
self.tree.nodes[h][NodeAttr.BELIEFS] = self.tree.nodes[h][NodeAttr.BELIEFS][1:]
# Back-propagate value and number of visits
self.tree.nodes[h][NodeAttr.VISITS] += 1
self.tree.nodes[best_node][NodeAttr.VISITS] += 1
self.tree.nodes[best_node][NodeAttr.VALUE] += \
(cum_reward - self.tree.nodes[best_node][NodeAttr.VALUE]) / self.tree.nodes[best_node][NodeAttr.VISITS]
return cum_reward
# Check if a given observation node has been visited
def get_observation_node(self, h, obs):
"""
Get the child of 'h' corresponding to the observation 'obs'.
The node is created if it does not exist yet
:param h: the node whose child should be returned
:param obs: the observation specifying which child to return
:return: the child of 'h' corresponding to the observation 'obs'.
"""
# Add the observation to the tree if not already in it
children = self.tree.nodes[h][NodeAttr.CHILDREN]
if obs not in list(children.keys()):
self.tree.expand_tree_from(h, obs)
# Get the index of the node corresponding to the input observation
return children[obs]
def sample_belief_state(self, beliefs):
"""
Sample the belief state, or the prior over state if the
belief state does not contain any particle
:param beliefs: the belief state
:return: the sampled state
"""
return choice(beliefs) if len(beliefs) != 0 else self.generator.current_state() # TODO choice(self.states)
def sample_posterior(self, Bh, a, obs):
"""
Samples from posterior after taking action 'a' and receiving observation 'obs'
:param Bh: the belief state
:param a: the action taken by the agent
:param obs: the observation received by the agent
:return: the sample from the posterior
"""
# Sample from belief state
s = self.sample_belief_state(Bh)
# Simulate action in the environment, i.e. sample transition distribution
s_next, o_next, _ = self.generator(s, a)
# If the observation matches, then return the state
if o_next == obs:
return s_next
# Otherwise, sample a new state
return self.sample_posterior(Bh, a, obs)
def update_belief(self, action, observation):
"""
Updates belief by sampling posterior
:param action: the action taken by the agent
:param observation: the observation received by the agent
:return: nothing
"""
# Retreive current belief state
root = self.root()
prior = root[NodeAttr.BELIEFS].copy()
# Compute new belief state
root[NodeAttr.BELIEFS] = []
for _ in range(self.no_particles):
root[NodeAttr.BELIEFS].append(self.sample_posterior(prior, action, observation))
def root(self):
"""
Getter
:return: the tree's root.
"""
return self.tree.nodes[-1]
|
{"hexsha": "3cca799a503d3e7acd83d6a7a03aac8a2e383f01", "size": 12185, "ext": "py", "lang": "Python", "max_stars_repo_path": "srcs/agent/POMCP.py", "max_stars_repo_name": "ChampiB/POMCP", "max_stars_repo_head_hexsha": "af6b7f9df3476126abad2adf21cc618e1d9898d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "srcs/agent/POMCP.py", "max_issues_repo_name": "ChampiB/POMCP", "max_issues_repo_head_hexsha": "af6b7f9df3476126abad2adf21cc618e1d9898d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "srcs/agent/POMCP.py", "max_forks_repo_name": "ChampiB/POMCP", "max_forks_repo_head_hexsha": "af6b7f9df3476126abad2adf21cc618e1d9898d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4923076923, "max_line_length": 122, "alphanum_fraction": 0.6228149364, "include": true, "reason": "from numpy", "num_tokens": 2756}
|
[STATEMENT]
lemma HNatInfinite_FreeUltrafilterNat_iff:
"(star_n X \<in> HNatInfinite) = (\<forall>u. eventually (\<lambda>n. u < X n) \<U>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (star_n X \<in> HNatInfinite) = (\<forall>u. \<forall>\<^sub>F n in \<U>. u < X n)
[PROOF STEP]
by (rule iffI [OF HNatInfinite_FreeUltrafilterNat FreeUltrafilterNat_HNatInfinite])
|
{"llama_tokens": 163, "file": null, "length": 1}
|
from .dummy_gym_env import DummyEnv
from gym.spaces import Box, Discrete
import numpy as np
from supersuit import (
frame_stack_v1,
reshape_v0,
observation_lambda_v0,
action_lambda_v1,
dtype_v0,
)
import supersuit
import pytest
base_obs = (np.zeros([8, 8, 3]) + np.arange(3)).astype(np.float32)
base_obs_space = Box(low=np.float32(0.0), high=np.float32(10.0), shape=[8, 8, 3])
base_act_spaces = Discrete(5)
def test_reshape():
base_env = DummyEnv(base_obs, base_obs_space, base_act_spaces)
env = reshape_v0(base_env, (64, 3))
obs = env.reset()
assert obs.shape == (64, 3)
first_obs, _, _, _ = env.step(5)
assert np.all(np.equal(first_obs, base_obs.reshape([64, 3])))
def new_continuous_dummy():
base_act_spaces = Box(low=np.float32(0.0), high=np.float32(10.0), shape=[3])
return DummyEnv(base_obs, base_obs_space, base_act_spaces)
def new_dummy():
return DummyEnv(base_obs, base_obs_space, base_act_spaces)
wrappers = [
supersuit.color_reduction_v0(new_dummy(), "R"),
supersuit.resize_v0(dtype_v0(new_dummy(), np.uint8), x_size=5, y_size=10),
supersuit.resize_v0(dtype_v0(new_dummy(), np.uint8), x_size=5, y_size=10, linear_interp=True),
supersuit.dtype_v0(new_dummy(), np.int32),
supersuit.flatten_v0(new_dummy()),
supersuit.reshape_v0(new_dummy(), (64, 3)),
supersuit.normalize_obs_v0(new_dummy(), env_min=-1, env_max=5.0),
supersuit.frame_stack_v1(new_dummy(), 8),
supersuit.reward_lambda_v0(new_dummy(), lambda x: x / 10),
supersuit.clip_reward_v0(new_dummy()),
supersuit.clip_actions_v0(new_continuous_dummy()),
supersuit.frame_skip_v0(new_dummy(), 4),
supersuit.frame_skip_v0(new_dummy(), (4, 6)),
supersuit.sticky_actions_v0(new_dummy(), 0.75),
supersuit.delay_observations_v0(new_dummy(), 1),
supersuit.max_observation_v0(new_dummy(), 3),
supersuit.nan_noop_v0(new_dummy(), 0),
supersuit.nan_zeros_v0(new_dummy()),
supersuit.nan_random_v0(new_dummy()),
supersuit.scale_actions_v0(new_continuous_dummy(), 0.5),
]
@pytest.mark.parametrize("env", wrappers)
def test_basic_wrappers(env):
env.seed(5)
obs = env.reset()
act_space = env.action_space
obs_space = env.observation_space
assert obs_space.contains(obs)
assert obs.dtype == obs_space.dtype
for i in range(10):
env.step(act_space.sample())
def test_lambda():
def add1(obs, obs_space):
return obs + 1
base_env = DummyEnv(base_obs, base_obs_space, base_act_spaces)
env = observation_lambda_v0(base_env, add1)
obs0 = env.reset()
assert int(obs0[0][0][0]) == 1
env = observation_lambda_v0(env, add1)
obs0 = env.reset()
assert int(obs0[0][0][0]) == 2
def tile_obs(obs, obs_space):
shape_size = len(obs.shape)
tile_shape = [1] * shape_size
tile_shape[0] *= 2
return np.tile(obs, tile_shape)
env = observation_lambda_v0(env, tile_obs)
obs0 = env.reset()
assert env.observation_space.shape == (16, 8, 3)
def change_shape_fn(obs_space):
return Box(low=0, high=1, shape=(32, 8, 3))
env = observation_lambda_v0(env, tile_obs)
obs0 = env.reset()
assert env.observation_space.shape == (32, 8, 3)
assert obs0.shape == (32, 8, 3)
def test_action_lambda():
def inc1(x, space):
return x + 1
def change_space_fn(space):
return Discrete(space.n + 1)
base_env = DummyEnv(base_obs, base_obs_space, base_act_spaces)
env = action_lambda_v1(base_env, inc1, change_space_fn)
assert env.action_space.n == base_env.action_space.n + 1
env.reset()
env.step(5)
def one_hot(x, n):
v = np.zeros(n)
v[x] = 1
return v
act_spaces = Box(low=0, high=1, shape=(15,))
base_env = DummyEnv(base_obs, base_obs_space, act_spaces)
env = action_lambda_v1(
base_env,
lambda action, act_space: one_hot(action, act_space.shape[0]),
lambda act_space: Discrete(act_space.shape[0]),
)
env.reset()
env.step(2)
def test_rew_lambda():
env = supersuit.reward_lambda_v0(new_dummy(), lambda x: x / 10)
env.reset()
obs, rew, done, info = env.step(0)
assert rew == 1.0 / 10
|
{"hexsha": "c376d28fac88d1d3096419bbeb3b5b1f44942736", "size": 4227, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/gym_mock_test.py", "max_stars_repo_name": "PettingZoo-Team/SuperSu", "max_stars_repo_head_hexsha": "3c4e364b4744649cb9eaa9201d70b5be3d43730f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 237, "max_stars_repo_stars_event_min_datetime": "2020-05-29T04:21:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-08T05:37:43.000Z", "max_issues_repo_path": "test/gym_mock_test.py", "max_issues_repo_name": "PettingZoo-Team/SuperSu", "max_issues_repo_head_hexsha": "3c4e364b4744649cb9eaa9201d70b5be3d43730f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2020-07-19T18:44:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-13T03:26:29.000Z", "max_forks_repo_path": "test/gym_mock_test.py", "max_forks_repo_name": "PettingZoo-Team/SuperSu", "max_forks_repo_head_hexsha": "3c4e364b4744649cb9eaa9201d70b5be3d43730f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-05-29T04:21:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-30T20:07:20.000Z", "avg_line_length": 30.4100719424, "max_line_length": 98, "alphanum_fraction": 0.6735273243, "include": true, "reason": "import numpy", "num_tokens": 1208}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from agent import CUDAAgent
from .base_student import BaseStudent
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def flatten(_list):
return [item for sublist in _list for item in sublist]
# pylint: disable=arguments-differ
class ICILStudent(BaseStudent, CUDAAgent):
def __init__(
self,
env,
trajs_paths,
model_path,
num_training_envs,
teacher,
causal_features_encoder,
noise_features_encoders,
causal_features_decoder,
noise_features_decoders,
observations_decoder,
env_discriminator,
policy_network,
energy_model,
mine_network,
buffer,
adam_alpha,
):
super(ICILStudent, self).__init__(
env=env,
trajs_paths=trajs_paths,
model_path=model_path,
teacher=teacher,
buffer=buffer,
)
self.num_training_envs = num_training_envs
self.causal_features_encoder = causal_features_encoder.to(self.device)
self.causal_features_decoder = causal_features_decoder.to(self.device)
self.observations_decoder = observations_decoder.to(self.device)
self.env_discriminator = env_discriminator.to(self.device)
self.policy_network = policy_network.to(self.device)
self.mine_network = mine_network.to(self.device)
self.energy_model = energy_model
self.noise_features_encoders = []
self.noise_features_decoders = []
for i in range(self.num_training_envs):
self.noise_features_encoders.append(noise_features_encoders[i].to(self.device))
self.noise_features_decoders.append(noise_features_decoders[i].to(self.device))
self.adam_alpha = adam_alpha
noise_models_params = flatten(
[list(noise_features_encoder.parameters()) for noise_features_encoder in self.noise_features_encoders]
) + flatten(
[list(noise_features_decoder.parameters()) for noise_features_decoder in self.noise_features_decoders]
)
self.rep_optimizer = optim.Adam(
list(causal_features_encoder.parameters())
+ list(causal_features_decoder.parameters())
+ list(observations_decoder.parameters())
+ noise_models_params
+ list(policy_network.parameters()),
lr=self.adam_alpha,
)
self.policy_opt = optim.Adam(
list(causal_features_encoder.parameters()) + list(policy_network.parameters()), lr=self.adam_alpha
)
self.disc_opt = optim.Adam(list(env_discriminator.parameters()), lr=self.adam_alpha)
self.mine_opt = optim.Adam(self.mine_network.parameters(), lr=1e-4)
self.buffer = buffer
def select_action(self, state, eval_mode=False):
causal_rep = self.causal_features_encoder(torch.FloatTensor(state).to(self.device))
action = self.policy_network(causal_rep).argmax()
action = action.detach().cpu().numpy()
if eval_mode:
action = self.policy_network(causal_rep).detach().cpu().numpy()
num_actions = action.shape[0]
action = np.argmax(action)
one_hot_action = np.eye(num_actions)[action]
action_logits = self.policy_network(causal_rep).detach().cpu().numpy()
action_prob = softmax(action_logits)
return one_hot_action, action_prob
return action
def train(self, num_updates):
for update_index in tqdm(range(num_updates)):
self._update_networks()
self.env.close()
self.serialize()
def serialize(self):
torch.save(self.policy_network.state_dict(), self.model_path)
def deserialize(self):
self.policy_network.load_state_dict(torch.load(self.model_path))
def _update_networks(self):
samples = self.buffer.sample()
(
ce_loss,
disc_entropy,
next_state_pred_loss,
next_state_energy_loss,
expert_samples_energy,
mi_loss,
env_discriminator_loss,
mine_loss,
) = self._compute_loss(samples)
rep_loss = disc_entropy + next_state_pred_loss + mi_loss
policy_loss = ce_loss + next_state_energy_loss
self.rep_optimizer.zero_grad()
self.policy_opt.zero_grad()
rep_loss.backward(retain_graph=True)
policy_loss.backward()
self.rep_optimizer.step()
self.policy_opt.step()
self.disc_opt.zero_grad()
env_discriminator_loss.backward()
self.disc_opt.step()
self.mine_opt.zero_grad()
mine_loss.backward()
self.mine_opt.step()
def _compute_loss(self, samples):
state = torch.FloatTensor(samples["state"]).to(self.device)
action = torch.LongTensor(samples["action"]).to(self.device)
next_state = torch.FloatTensor(samples["next_state"]).to(self.device)
env_ids = torch.LongTensor(samples["env"]).to(self.device)
causal_rep = self.causal_features_encoder(state)
# 1. Policy loss
qvalues = self.policy_network(causal_rep)
ce_loss = nn.CrossEntropyLoss()(qvalues, action)
action_one_hot = F.one_hot(action).type(torch.FloatTensor).to(self.device)
imitation_action = F.gumbel_softmax(qvalues, hard=True).type(torch.FloatTensor).to(self.device)
# 2. Env discriminator entropy loss for causal representation learning
predicted_env = self.env_discriminator(causal_rep)
disc_entropy_entropy = torch.mean(F.softmax(predicted_env, dim=1) * F.log_softmax(predicted_env, dim=1))
# 3. Enc discriminator cross-entropy loss for training environment classifier
predicted_env = self.env_discriminator(causal_rep.detach())
env_discriminator_loss = nn.CrossEntropyLoss()(predicted_env, env_ids)
# 4. Next state prediction loss
#############################################################################################################
noise_rep = causal_rep.clone()
next_state_noise_rep = causal_rep.clone()
for env_id in range(self.num_training_envs):
env_samples_idx = torch.where(env_ids == env_id)[0]
if env_samples_idx.shape[0] == 0:
env_samples_idx = torch.LongTensor([0]).to(self.device)
state_env = state[env_samples_idx]
action_one_hot_env = action_one_hot[env_samples_idx]
noise_rep_env = self.noise_features_encoders[env_id](state_env)
next_state_noise_rep_env = self.noise_features_decoders[env_id](noise_rep_env, action_one_hot_env)
noise_rep[env_samples_idx] = noise_rep_env
next_state_noise_rep[env_samples_idx] = next_state_noise_rep_env
next_state_causal_rep = self.causal_features_decoder(causal_rep, action_one_hot)
predicted_next_state = self.observations_decoder(next_state_causal_rep, next_state_noise_rep)
next_state_pred_loss = nn.MSELoss()(predicted_next_state, next_state)
#############################################################################################################
# 5. Mutual information loss
mi_loss = self.mine_network.mi(causal_rep, noise_rep)
mine_loss = self.mine_network.forward(causal_rep.detach(), noise_rep.detach())
# 6. Energy loss
#############################################################################################################
next_state_noise_rep_energy = causal_rep.clone()
with torch.no_grad():
for env_id in range(self.num_training_envs):
env_samples_idx = torch.where(env_ids == env_id)[0]
if env_samples_idx.shape[0] == 0:
env_samples_idx = torch.LongTensor([0]).to(self.device)
imitation_action_env = imitation_action[env_samples_idx]
noise_rep_env = noise_rep[env_samples_idx]
next_state_noise_rep_env = self.noise_features_decoders[env_id](noise_rep_env, imitation_action_env)
next_state_noise_rep_energy[env_samples_idx] = next_state_noise_rep_env
next_state_causal_rep_energy = self.causal_features_decoder(causal_rep, imitation_action)
predicted_next_state_energy = self.observations_decoder(
next_state_causal_rep_energy, next_state_noise_rep_energy
)
expert_samples_energy = self.energy_model.forward(state).mean()
next_state_energy_loss = self.energy_model.forward(predicted_next_state_energy).mean()
#############################################################################################################
return (
ce_loss,
disc_entropy_entropy,
next_state_pred_loss,
next_state_energy_loss,
expert_samples_energy,
mi_loss,
env_discriminator_loss,
mine_loss,
)
|
{"hexsha": "0caad34d85bfdcdf380dd2ad1a004435160d7875", "size": 9262, "ext": "py", "lang": "Python", "max_stars_repo_path": "student/icil_student.py", "max_stars_repo_name": "ioanabica/Invariant-Causal-Imitation-Learning", "max_stars_repo_head_hexsha": "eb92fac1db6e418250ad383d888d69faa667e7aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2022-01-06T14:42:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T16:41:14.000Z", "max_issues_repo_path": "student/icil_student.py", "max_issues_repo_name": "vanderschaarlab/Invariant-Causal-Imitation-Learning", "max_issues_repo_head_hexsha": "eb92fac1db6e418250ad383d888d69faa667e7aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-03T16:28:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T16:28:25.000Z", "max_forks_repo_path": "student/icil_student.py", "max_forks_repo_name": "vanderschaarlab/Invariant-Causal-Imitation-Learning", "max_forks_repo_head_hexsha": "eb92fac1db6e418250ad383d888d69faa667e7aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2022-01-15T05:06:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T14:11:25.000Z", "avg_line_length": 37.6504065041, "max_line_length": 117, "alphanum_fraction": 0.6321528827, "include": true, "reason": "import numpy", "num_tokens": 1809}
|
#These are the basics + the ability to shut the program down.
import pygame as pg #This is where I get all the pygame stuff
import numpy as np #I import this becouse I'm in love with numpy
from pygame.locals import QUIT #QUIT is a constant (I think) that indicates wether the user is trying to quit the program by pushing the x in the top-right corner of the window
class myGame:
def __init__(self, settings): #my class takes in one argument, settings
pg.init() #the pygame program starts with this
self.settings = settings #copying settings into a global variable with the same name
self.screen = pg.display.set_mode(( settings["width"], settings["height"] ))
self.clock = pg.time.Clock()
self.running = False
def handleEvents(self): #Here we check if the user is trying to quit the program
for event in pg.event.get():
if(event.type == QUIT):
self.running = False
def mainLoop(self):
self.running = True
while(self.running):
self.handleEvents()
self.clock.tick(self.settings["fps"])
print("tick")
#Definitions: (This might not be the best way to do this but for now, I like it.)
settings = {#A dictionary containing my program specifications
"width" : 800, #resolution width
"height" : 600, #resolution height
"fps" : 2 #frames per second
}
#Run program:
myObject = myGame(settings)
myObject.mainLoop()
|
{"hexsha": "9b93715695dfe8d510ac19a3ce21651ea45f0373", "size": 1361, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyGame/pygame2/ex1/ex2.py", "max_stars_repo_name": "hoppfull/Legacy-Python", "max_stars_repo_head_hexsha": "43f465bfdb76c91f2ac16aabb0783fdf5f459adb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PyGame/pygame2/ex1/ex2.py", "max_issues_repo_name": "hoppfull/Legacy-Python", "max_issues_repo_head_hexsha": "43f465bfdb76c91f2ac16aabb0783fdf5f459adb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PyGame/pygame2/ex1/ex2.py", "max_forks_repo_name": "hoppfull/Legacy-Python", "max_forks_repo_head_hexsha": "43f465bfdb76c91f2ac16aabb0783fdf5f459adb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8055555556, "max_line_length": 176, "alphanum_fraction": 0.7244673035, "include": true, "reason": "import numpy", "num_tokens": 341}
|
from scipy.io import loadmat
from datetime import datetime
import os
def calc_age(taken, dob):
birth = datetime.fromordinal(max(int(dob) - 366, 1))
# assume the photo was taken in the middle of the year
if birth.month < 7:
return taken - birth.year
else:
return taken - birth.year - 1
def get_meta(mat_path, db):
meta = loadmat(mat_path)
full_path = meta[db][0, 0]["full_path"][0]
dob = meta[db][0, 0]["dob"][0] # Matlab serial date number
gender = meta[db][0, 0]["gender"][0]
photo_taken = meta[db][0, 0]["photo_taken"][0] # year
face_score = meta[db][0, 0]["face_score"][0]
second_face_score = meta[db][0, 0]["second_face_score"][0]
age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]
return full_path, dob, gender, photo_taken, face_score, second_face_score, age
def load_data(mat_path):
d = loadmat(mat_path)
return d["image"], d["gender"][0], d["age"][0], d["db"][0], d["img_size"][0, 0], d["min_score"][0, 0]
def get_age_group(age):
if age <= 2:
return 0
elif age <= 9:
return 1
elif age <= 19:
return 2
elif age <= 29:
return 3
elif age <= 39:
return 4
elif age <= 49:
return 5
elif age <= 59:
return 6
elif age <= 69:
return 7
else:
return 8
def get_meta_maf(age_group):
img_paths = []
ages = []
genders = []
with open('data/MixedAsianFace/file_names.txt', 'r') as f:
for line in f:
striped_line = line.strip()
dataset, file_name, *_ = striped_line.split('/')
if (file_name == 'image sets'):
continue
if dataset == 'AFAD-Full':
_, age, gender, *_ = striped_line.split('/')
genders.append(1 if gender == '111' else 0)
elif dataset == 'UTKFace':
age, gender, *_ = file_name.split('_')
genders.append(1 if gender == '0' else 0)
elif dataset == 'AAF':
age, gender, *_ = file_name.split('_')
genders.append(int(gender))
else:
age_range, gender, *_ = file_name.split('_')
if age_range == 'more than 70':
age = 70
else:
low, high = age_range.split('-')
age = (int(high) + int(low)) / 2
genders.append(1 if gender == 'Male' else 0)
if age_group == 'y':
age = get_age_group(int(age))
if int(age) <= 80:
img_paths.append(striped_line)
ages.append(int(age))
else:
genders.pop()
return img_paths, ages, genders
def get_meta_megaage(age_group):
img_paths = []
ages = []
genders = []
with open('megaage_asian/file_names.txt', 'r') as f:
reader = f.read().strip().split('\n')
for temp_path in reader:
pre, post = temp_path.split('/')
post = '_'.join(post.split('_')[1:])
img_paths.append(pre + '/' + post)
img_paths = img_paths[3537:]
ages = [int(temp_path.split('/')[-1].split('_')[1]) for temp_path in reader][3537:]
genders = [int(temp_path.split('/')[-1].split('_')[0]) for temp_path in reader][3537:]
return img_paths, ages, genders
|
{"hexsha": "3c2ed32ba4dbe6a1edbd1f4e8fd1625291fe8fc3", "size": 3060, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "phanbaominh/age-gender-estimation", "max_stars_repo_head_hexsha": "a157e486f11b21fa46b447a2d7e978e0ff94a919", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "phanbaominh/age-gender-estimation", "max_issues_repo_head_hexsha": "a157e486f11b21fa46b447a2d7e978e0ff94a919", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "phanbaominh/age-gender-estimation", "max_forks_repo_head_hexsha": "a157e486f11b21fa46b447a2d7e978e0ff94a919", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7087378641, "max_line_length": 105, "alphanum_fraction": 0.5859477124, "include": true, "reason": "from scipy", "num_tokens": 894}
|
[STATEMENT]
lemma (in Ring) LSM_eq_linear_span:"\<lbrakk>R module M; T \<subseteq> carrier M\<rbrakk> \<Longrightarrow>
(LSM\<^bsub>R\<^esub> M T) = linear_span R M (carrier R) T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>R module M; T \<subseteq> carrier M\<rbrakk> \<Longrightarrow> LSM\<^bsub>R\<^esub> M T = linear_span R M (carrier R) T
[PROOF STEP]
apply (cut_tac whole_ideal)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R)\<rbrakk> \<Longrightarrow> LSM\<^bsub>R\<^esub> M T = linear_span R M (carrier R) T
[PROOF STEP]
apply (rule equalityI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R)\<rbrakk> \<Longrightarrow> LSM\<^bsub>R\<^esub> M T \<subseteq> linear_span R M (carrier R) T
2. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R)\<rbrakk> \<Longrightarrow> linear_span R M (carrier R) T \<subseteq> LSM\<^bsub>R\<^esub> M T
[PROOF STEP]
apply (frule Module.linear_span_subModule[of M R "carrier R" T], assumption+)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R); submodule R M (linear_span R M (carrier R) T)\<rbrakk> \<Longrightarrow> LSM\<^bsub>R\<^esub> M T \<subseteq> linear_span R M (carrier R) T
2. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R)\<rbrakk> \<Longrightarrow> linear_span R M (carrier R) T \<subseteq> LSM\<^bsub>R\<^esub> M T
[PROOF STEP]
apply (rule LSM_sub_submodule[of M T "linear_span R M (carrier R) T"],
assumption+, simp add:Module.l_span_cont_H[of M R T])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R)\<rbrakk> \<Longrightarrow> linear_span R M (carrier R) T \<subseteq> LSM\<^bsub>R\<^esub> M T
[PROOF STEP]
apply (frule LSM_submodule[of M T], assumption)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R); submodule R M (LSM\<^bsub>R\<^esub> M T)\<rbrakk> \<Longrightarrow> linear_span R M (carrier R) T \<subseteq> LSM\<^bsub>R\<^esub> M T
[PROOF STEP]
apply (frule LSM_inc_T[of M T], assumption)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>R module M; T \<subseteq> carrier M; ideal R (carrier R); submodule R M (LSM\<^bsub>R\<^esub> M T); T \<subseteq> LSM\<^bsub>R\<^esub> M T\<rbrakk> \<Longrightarrow> linear_span R M (carrier R) T \<subseteq> LSM\<^bsub>R\<^esub> M T
[PROOF STEP]
apply (rule Module.l_span_sub_submodule[of M R "carrier R" "LSM\<^bsub>R\<^esub> M T" T],
assumption+)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1105, "file": "Group-Ring-Module_Algebra9", "length": 8}
|
[STATEMENT]
lemma plusl_bot_infty: "\<bottom>\<^sub>1 +\<^sub>1 \<infinity>\<^sub>1 = \<bottom>\<^sub>1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<bottom>\<^sub>1 +\<^sub>1 \<infinity>\<^sub>1 = \<bottom>\<^sub>1
[PROOF STEP]
by (simp)
|
{"llama_tokens": 103, "file": "Regular_Algebras_Pratts_Counterexamples", "length": 1}
|
% Part: first-order-logic
% Chapter: tableaux
% Section: quantifier-rules
\documentclass[../../../include/open-logic-section]{subfiles}
\begin{document}
\olfileid{fol}{tab}{qrl}
\olsection{Quantifier Rules}
\subsection{Rules for $\lforall$}
\begin{defish}
\AxiomC{\sFmla{\True}{\lforall[x][!A(x)]}}
\RightLabel{\TRule{\True}{\forall}}
\UnaryInfC{\sFmla{\True}{!A(t)}}
\DisplayProof
\hfill
\AxiomC{\sFmla{\False}{\lforall[x][!A(x)]}}
\RightLabel{\TRule{\False}{\lforall}}
\UnaryInfC{\sFmla{\False}{!A(a)}}
\DisplayProof
\end{defish}
In \TRule{\True}{\lforall}, $t$ is a closed term (i.e., one without
variables). In \TRule{\False}{\lforall}, $a$~is !!a{constant} which
must not occur anywhere in the branch above \TRule{\False}{\lforall}
rule. We call $a$ the \emph{eigenvariable} of the
\TRule{\False}{\forall} inference.\footnote{We use the term
``eigenvariable'' even though $a$ in the above rule is !!a{constant}.
This has historical reasons.}
\subsection{Rules for $\lexists$}
\begin{defish}
\AxiomC{\sFmla{\True}{\lexists[x][!A(x)]}}
\RightLabel{\TRule{\True}{\lexists}}
\UnaryInfC{\sFmla{\True}{!A(a)}}
\DisplayProof
\hfill
\AxiomC{\sFmla{\False}{\lexists[x][!A(x)]}}
\RightLabel{\TRule{\False}{\lexists}}
\UnaryInfC{\sFmla{\False}{!A(t)}}
\DisplayProof
\end{defish}
Again, $t$~is a closed term, and $a$~is !!a{constant} which does not
occur in the branch above the~\TRule{\True}{\lexists} rule. We call
$a$ the \emph{eigenvariable} of the \TRule{\True}{\lexists} inference.
The condition that an eigenvariable not occur in the branch above the
\TRule{\False}{\lforall} or \TRule{\True}{\lexists} inference is
called the \emph{eigenvariable condition}.
\begin{explain}
Recall the convention that when $!A$ is !!a{formula} with the
!!{variable}~$x$ free, we indicate this by writing~$!A(x)$. In the
same context, $!A(t)$ then is short for~$\Subst{!A}{t}{x}$. So we
could also write the \TRule{\False}{\lexists} rule as:
\begin{prooftree}
\AxiomC{\sFmla{\False}{\lexists[x][!A]}}
\RightLabel{\TRule{\False}{\lexists}}
\UnaryInfC{\sFmla{\False}{\Subst{!A}{t}{x}}}
\end{prooftree}
Note that $t$ may already occur in~$!A$, e.g., $!A$~might
be~$\Atom{\Obj P}{t,x}$. Thus, inferring $\sFmla{\False}{\Atom{\Obj
P}{t,t}}$ from~$ \sFmla{\False}{\lexists[x][\Atom{\Obj P}{t,x}]}$ is
a correct application of~\TRule{\False}{\lexists}. However, the
eigenvariable conditions in \TRule{\False}{\lforall}
and~\TRule{\True}{\lexists} require that the !!{constant}~$a$ does
not occur in~$!A$. So, you cannot correctly infer
$\sFmla{\False}{\Atom{\Obj P}{a,a}}$ from
$\sFmla{\False}{\lforall[x][\Atom{\Obj P}{a,x}]}$
using~$\TRule{\False}{\lforall}$.
\end{explain}
\begin{explain}
In \TRule{\True}{\lforall} and \TRule{\False}{\lexists} there are no
restrictions on the term~$t$. On the other hand, in the
\TRule{\True}{\lexists} and \TRule{\False}{\lforall} rules, the
eigenvariable condition requires that the !!{constant}~$a$ does not
occur anywhere in the branches above the respective inference. It is
necessary to ensure that the system is sound. Without this condition,
the following would be a closed !!{tableau} for
$\lexists[x][\formula{A}(x)] \lif \lforall[x][\formula{A}(x)]$:
\begin{center}
\begin{tableau}{}
[\sFmla{\False}{\lexists[x][\formula{A}(x)] \lif \lforall[x][\formula{A}(x)]}, just=\TAss
[\sFmla{\True}{\lexists[x][\formula{A}(x)]},
just={\TRule{\False}{\lif}[1]}
[\sFmla{\False}{\lforall[x][\formula{A}(x)]},
just={\TRule{\False}{\lif}[1]}
[\sFmla{\True}{\formula{A}(a)},
just={\TRule{\True}{\lexists}[2]}
[\sFmla{\False}{\formula{A}(a)},
just={\TRule{\False}{\lforall}[3]}, close]
]
]
]
]
\end{tableau}
\end{center}
However, $\lexists[x][\formula{A}(x)] \lif
\lforall[x][\formula{A}(x)]$ is not valid.
\end{explain}
\end{document}
|
{"hexsha": "60158f3b85d68251e086522c9da02ff9959cfbde", "size": 3850, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/first-order-logic/tableaux/quantifier-rules.tex", "max_stars_repo_name": "GKerfImf/OpenLogic", "max_stars_repo_head_hexsha": "5791905d3149f68e05885290f448054b98a0e51b", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/first-order-logic/tableaux/quantifier-rules.tex", "max_issues_repo_name": "GKerfImf/OpenLogic", "max_issues_repo_head_hexsha": "5791905d3149f68e05885290f448054b98a0e51b", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/first-order-logic/tableaux/quantifier-rules.tex", "max_forks_repo_name": "GKerfImf/OpenLogic", "max_forks_repo_head_hexsha": "5791905d3149f68e05885290f448054b98a0e51b", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0, "max_line_length": 91, "alphanum_fraction": 0.6644155844, "num_tokens": 1406}
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
"""This file has functions to implement different dances for the agent.
"""
import numpy as np
import tasks
import shapes
import search
from util import ErrorWithResponse
# FIXME! actual jump on client
jump = [{"translate": (0, 1, 0)}, {"translate": (0, -1, 0)}]
konami_dance = [
{"translate": (0, 1, 0)},
{"translate": (0, 1, 0)},
{"translate": (0, -1, 0)},
{"translate": (0, -1, 0)},
{"translate": (0, 0, -1)},
{"translate": (0, 0, 1)},
{"translate": (0, 0, -1)},
{"translate": (0, 0, 1)},
]
# TODO relative to current
head_bob = [
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 90)},
{"head_yaw_pitch": (0, 0)},
{"head_yaw_pitch": (0, 0)},
]
def add_default_dances(memory):
memory.add_dance(generate_sequential_move_fn(jump), name="jump")
memory.add_dance(
generate_sequential_move_fn(konami_dance),
name="konami dance",
tags=["ornamental_dance", "konami"],
)
memory.add_dance(
generate_sequential_move_fn(head_bob), name="head bob", tags=["ornamental_dance"]
)
def generate_sequential_move_fn(sequence):
def move_fn(danceObj, agent):
if danceObj.tick >= len(sequence):
return None
else:
if danceObj.dance_location is not None and danceObj.tick == 0:
mv = tasks.Move(agent, {"target": danceObj.dance_location, "approx": 0})
danceObj.dance_location = None
else:
mv = tasks.DanceMove(agent, sequence[danceObj.tick])
danceObj.tick += 1
return mv
return move_fn
class Movement(object):
def __init__(self, agent, move_fn, dance_location=None):
self.agent = agent
self.move_fn = move_fn
self.dance_location = dance_location
self.tick = 0
def get_move(self):
# move_fn should output a tuple (dx, dy, dz) corresponding to a
# change in Movement or None
# if None then Movement is finished
# can output
return self.move_fn(self, self.agent)
# class HeadTurnInstant(Movement):
# TODO: class TimedDance(Movement): !!!!
# e.g. go around the x
# go through the x
# go over the x
# go across the x
class RefObjMovement(Movement):
def __init__(
self,
agent,
ref_object=None,
relative_direction="CLOCKWISE", # this is the memory of the object
):
self.agent = agent
self.tick = 0
if not ref_object:
x, y, z = agent.pos
bounds = (x, x, y, y, z, z)
center = (x, y, z)
else:
bounds = ref_object.get_bounds()
center = ref_object.get_pos()
d = max(bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])
if relative_direction == "CLOCKWISE" or relative_direction == "AROUND":
offsets = shapes.arrange(
"circle", schematic=None, shapeparams={"encircled_object_radius": d}
)
elif relative_direction == "ANTICLOCKWISE":
offsets = shapes.arrange(
"circle", schematic=None, shapeparams={"encircled_object_radius": d}
)
offsets = offsets[::-1]
else:
raise NotImplementedError("TODO other kinds of paths")
self.path = [np.round(np.add(center, o)) for o in offsets]
self.path.append(self.path[0])
# check each offset to find a nearby reachable point, see if a path
# is possible now, and error otherwise
for i in range(len(self.path) - 1):
path = search.astar(agent, self.path[i + 1], approx=2, pos=self.path[i])
if path is None:
raise ErrorWithResponse("I cannot find an appropriate path.")
def get_move(self):
if self.tick >= len(self.path):
return None
mv = tasks.Move(self.agent, {"target": self.path[self.tick], "approx": 2})
self.tick += 1
return mv
|
{"hexsha": "0a646f079abbf0892aa42a6aeba9e5fdf6fd004a", "size": 4760, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/craftassist/dance.py", "max_stars_repo_name": "anoushkt/craftassist", "max_stars_repo_head_hexsha": "c200af65e52e800f0f0cc540fe836b644383349d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-13T06:01:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-13T06:01:03.000Z", "max_issues_repo_path": "python/craftassist/dance.py", "max_issues_repo_name": "anoushkt/craftassist", "max_issues_repo_head_hexsha": "c200af65e52e800f0f0cc540fe836b644383349d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/craftassist/dance.py", "max_forks_repo_name": "anoushkt/craftassist", "max_forks_repo_head_hexsha": "c200af65e52e800f0f0cc540fe836b644383349d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5128205128, "max_line_length": 89, "alphanum_fraction": 0.5716386555, "include": true, "reason": "import numpy", "num_tokens": 1304}
|
from zipfile import ZipFile
import pandas as pd
from scipy.io import savemat
def read_filename(filename):
with ZipFile(f'../contests/responses/{filename}') as myzip:
csv_file = filename.replace('.zip', '')
with myzip.open(csv_file) as f:
df = pd.read_csv(f, index_col=0)
print(df.columns)
return df[['left_target_id', 'right_target_id', 'winner_target_id']].values
filename = '497-responses.csv.zip'
for filename in ['497-responses.csv.zip',
'508-round2-dueling-responses.csv.zip',
'509-round2-dueling-responses.csv.zip']:
contest = filename[:3]
left_right_winner = read_filename(filename)
print(len(left_right_winner))
savemat(f'{contest}_responses.mat', {'left_right_winner': left_right_winner})
if '497' in filename: break
|
{"hexsha": "d5d09c3ffe89cec265996de0cbbdba42b3ff8bc0", "size": 823, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/example-analyses/_unused-scripts/dueling-responses.py", "max_stars_repo_name": "kgjamieson/NEXT_data", "max_stars_repo_head_hexsha": "7cbe8080b441fc91e2e8198ec47c750e6517f83f", "max_stars_repo_licenses": ["CC-BY-4.0", "BSD-3-Clause"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2016-04-12T14:00:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T19:09:47.000Z", "max_issues_repo_path": "docs/example-analyses/_unused-scripts/dueling-responses.py", "max_issues_repo_name": "kgjamieson/NEXT_data", "max_issues_repo_head_hexsha": "7cbe8080b441fc91e2e8198ec47c750e6517f83f", "max_issues_repo_licenses": ["CC-BY-4.0", "BSD-3-Clause"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2016-02-26T17:39:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:24:51.000Z", "max_forks_repo_path": "docs/example-analyses/_unused-scripts/dueling-responses.py", "max_forks_repo_name": "kgjamieson/NEXT_data", "max_forks_repo_head_hexsha": "7cbe8080b441fc91e2e8198ec47c750e6517f83f", "max_forks_repo_licenses": ["CC-BY-4.0", "BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2017-10-20T08:46:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-25T01:35:12.000Z", "avg_line_length": 35.7826086957, "max_line_length": 81, "alphanum_fraction": 0.6780072904, "include": true, "reason": "from scipy", "num_tokens": 195}
|
"""
Train videos are convert into the image frames according to what UCF annotation and readMe.
Training models is created if no training has been done before, weights can be loaded from a pretrained model.
Training process is done using Faster R-CNN with VGG16 network.
The length of each epoch used to do training is 1000 and the total number of epochs trained is 5.
Generating some graph results for loss rpn classifier, loss rpn regression, loss class classifier, loss class regression,
total_loss and class_accuracy by calling plot_graph class and using the csv file that is generated.
"""
from __future__ import division
import random
import pprint
import sys
import time
import pickle
import pandas as pd
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from fast_rcnn import config, data_generators
from fast_rcnn import losses as rcnn_losses
import fast_rcnn.roi_helpers as roi_helpers
from tensorflow.python.keras.utils import generic_utils
from fast_rcnn.simple_parser import get_data
from fast_rcnn import vgg as nn
from format_dataset import remove_file
from plot_graph import train_process_graph
# maximum depth of the Python interpreter stack to the required limit.
# This limit prevents any program from getting into infinite recursion,
# Otherwise infinite recursion will lead to overflow of the C stack and crash the Python.
sys.setrecursionlimit(40000)
def update_execute_config():
"""
Loading the config and updating the config class.
Removing results data in the folder when running this class.
Setting the path to weights based on Faster R-CNN and VGG16 model.
Parsing the data from annotation file for training process.
:return classes_count: number of abnormal behaviour labels.
:return class_mapping: abnormal behaviour classes(labels).
:return all_imgs: all the training dataset.
:return curr_config: updated config class.
"""
curr_config = config.Config()
print("Clear the graph data in the folder {}".format(curr_config.result_graphs_path))
remove_file(curr_config.train_process_data_file)
# check if weight path was passed via command line
# Input path for weights. try to load default weights provided by keras.
# set the path to weights based on backend and model
curr_config.base_net_weights = nn.get_weight_path()
# Path to training data.
all_imgs, classes_count, class_mapping = get_data(curr_config.train_path_file, header=True)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
curr_config.class_mapping = class_mapping
print('Training images per class: ')
pprint.pprint(classes_count)
print('Num classes (including bg) = {}'.format(len(classes_count)))
with open(curr_config.train_config_output_filename, 'wb') as config_f:
pickle.dump(curr_config, config_f)
print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(
curr_config.train_config_output_filename))
num_imgs = len(all_imgs)
print('Num all images {}'.format(num_imgs))
return classes_count, class_mapping, all_imgs, curr_config
def image_generate_optimizer_classifier(classes_count, train_imgs, curr_config):
"""
Checking the pre-trained model weights is loaded or not.
Generate the ground_truth anchors.
Generating image classification.
This is done by defining the RPN and classifier to built on the vgg layers and using the Model import package to
to holds the RPN and the classifier, used to load/save weights for the models.
Then these model will be compiled to get the optimal rpn and classifier model.
:param classes_count: number of abnormal behaviour labels.
:param all_imgs: all the training dataset.
:param curr_config: updated config class.
:return model_rpn: optimal rpn model.
:return model_all: optimal models for faster r-cnn and vgg.
:return data_gen_train: the ground_truth anchors data.
:return model_classifier: optimal classifier model.
"""
data_gen_train = data_generators.get_anchor_ground_truth(train_imgs, classes_count, curr_config, nn.get_img_output_length,
K.image_data_format())
if K.image_data_format() == 'channels_first':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
# define the base network (VGG)
shared_layers = nn.vgg_network(img_input)
# define the RPN, built on the base layers
num_anchors = len(curr_config.anchor_box_scales) * len(curr_config.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, curr_config.num_rois, nb_classes=len(classes_count))
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
try:
# This will continue training
print('loading weights from {}'.format(curr_config.base_net_weights))
model_rpn.load_weights(curr_config.base_net_weights, by_name=True)
model_classifier.load_weights(curr_config.base_net_weights, by_name=True)
except:
print('Could not load pre-trained model weights')
# compile models
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(optimizer=optimizer,
loss=[rcnn_losses.rpn_loss_classifier(num_anchors), rcnn_losses.rpn_loss_regression(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier,
loss=[rcnn_losses.class_loss_classifier, rcnn_losses.class_loss_regression(len(classes_count) - 1)],
metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')
return model_rpn, model_all, data_gen_train, model_classifier
def image_training(curr_config, class_mapping, model_all, model_rpn, model_classifier, data_gen_train):
"""
Starting the training process.
Generate X (resizeImg) and label Y ([y_rpn_classifier, y_rpn_regr])
Training rpn model and get loss value for loss rpn classifier and loss rpn regression.
Getting predicted rpn from rpn model for rpn classifier and loss rpn regression.
Converting rpn layer to roi bounding boxes.
Generating data such as loss rpn classifier, loss rpn regression, loss class classifier, loss class regression,
total_loss and class_accuracy for each number of Epochs. These data will be used to generate graphs.
:param curr_config: updated config class.
:param class_mapping: abnormal behaviour classes(labels).
:param model_all: optimal models for faster r-cnn and vgg16.
:param model_rpn: optimal rpn model.
:param model_classifier: optimal classifier model.
:param data_gen_train: the ground_truth anchors data.
"""
print('Starting image training')
# start time
start_time = time.time()
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
losses = np.zeros((curr_config.epoch_length, 5))
best_loss = np.Inf
iter_num = 0
all_data = []
# record_dataset = pd.DataFrame(columns=['mean_overlapping_bboxes', 'class_acc', 'loss_rpn_cls','loss_rpn_regr', 'loss_class_cls', 'loss_class_regr','curr_loss', 'elapsed_time'])
for epoch_num in range(curr_config.num_epochs):
progbar = generic_utils.Progbar(curr_config.epoch_length)
print('Epoch {}/{}'.format(epoch_num + 1, curr_config.num_epochs))
while True:
try:
if len(rpn_accuracy_rpn_monitor) == curr_config.epoch_length and curr_config.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)) / len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print(
'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(
mean_overlapping_bboxes, curr_config.epoch_length))
if mean_overlapping_bboxes == 0:
print(
'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
X, Y, img_data = next(data_gen_train)
loss_rpn = model_rpn.train_on_batch(X, Y)
P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], curr_config, K.image_data_format(), use_regr=True,
overlap_thresh=0.7,
max_boxes=300)
# calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format.
# X2: bounding boxes that IntersectionOfUnion is greater than config.classifierMinOverlap.
# for all bounding boxes ground truth in 300 non_max_suppression bounding boxes.
# Y1: one hot code for bounding boxes from above => x_roi (X).
# Y2: corresponding labels and corresponding gt bounding boxes.
X2, Y1, Y2, IouS = roi_helpers.calculate_iou(R, img_data, curr_config, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if curr_config.num_rois > 1:
# if number of positive anchors is larger than 4//2 --> randomly choose 2 pos samples.
if len(pos_samples) < curr_config.num_rois // 2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, curr_config.num_rois // 2,
replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples,
curr_config.num_rois - len(selected_pos_samples),
replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples,
curr_config.num_rois - len(selected_pos_samples),
replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]],
[Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
progbar.update(iter_num + 1, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]),
('detector_cls', losses[iter_num, 2]),
('detector_regr', losses[iter_num, 3])])
iter_num += 1
if iter_num == curr_config.epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if curr_config.verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(
mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Elapsed time: {}'.format(time.time() - start_time))
elapsed_time = (time.time() - start_time)
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
# finish time
start_time = time.time()
if curr_loss < best_loss:
if curr_config.verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss))
best_loss = curr_loss
model_all.save_weights(curr_config.model_path)
# Create record csv file to store the train process
new_row = {'mean_overlapping_bboxes': round(mean_overlapping_bboxes, 3),
'class_acc': round(float(class_acc), 3),
'loss_rpn_cls': round(float(loss_rpn_cls), 3),
'loss_rpn_regr': round(float(loss_rpn_regr), 3),
'loss_class_cls': round(float(loss_class_cls), 3),
'loss_class_regr': round(float(loss_class_regr), 3),
'curr_loss': round(curr_loss, 3),
'elapsed_time': round(elapsed_time, 3)}
all_data.append(new_row)
record_dataset = pd.DataFrame(all_data)
record_dataset.to_csv(curr_config.train_process_data_file, index=0)
break
except Exception as e:
print('Exception while Training images: {}'.format(e))
continue
print('Training complete, exiting.')
def main():
"""
Executing all the methods above.
Generating some graph results for loss rpn classifier, loss rpn regression, loss class classifier, loss class regression,
total_loss and class_accuracy by calling plot_graph class and using the csv file that is generated.
"""
classes_count, class_mapping, train_imgs, curr_config = update_execute_config()
model_rpn, model_all, data_gen_train, model_classifier = image_generate_optimizer_classifier(classes_count,
train_imgs,
curr_config)
image_training(curr_config, class_mapping, model_all, model_rpn, model_classifier, data_gen_train)
train_process_graph(curr_config.train_process_data_file)
if __name__ == "__main__":
main()
quit(0)
|
{"hexsha": "45035253616fae9d29c4f2ccd865e036ec60eac6", "size": 16687, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_frcnn.py", "max_stars_repo_name": "piranavie/final_project_behaviour_detection", "max_stars_repo_head_hexsha": "84f91ed240b2be02e6d4ad562c2f4a9a185583e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_frcnn.py", "max_issues_repo_name": "piranavie/final_project_behaviour_detection", "max_issues_repo_head_hexsha": "84f91ed240b2be02e6d4ad562c2f4a9a185583e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_frcnn.py", "max_forks_repo_name": "piranavie/final_project_behaviour_detection", "max_forks_repo_head_hexsha": "84f91ed240b2be02e6d4ad562c2f4a9a185583e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8137535817, "max_line_length": 182, "alphanum_fraction": 0.6210822796, "include": true, "reason": "import numpy", "num_tokens": 3364}
|
import numpy as np
from rlpyt.replays.non_sequence.n_step import (NStepReturnBuffer,
SamplesFromReplay)
from rlpyt.replays.non_sequence.uniform import UniformReplay
from rlpyt.replays.non_sequence.prioritized import PrioritizedReplay
from rlpyt.replays.async_ import AsyncReplayBufferMixin
from rlpyt.utils.collections import namedarraytuple
from rlpyt.utils.buffer import torchify_buffer, buffer_from_example
SamplesFromReplayTL = namedarraytuple("SamplesFromReplayTL",
SamplesFromReplay._fields + ("timeout", "timeout_n"))
class NStepTimeLimitBuffer(NStepReturnBuffer):
"""For use in e.g. SAC when bootstrapping when 'done' due to timeout."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.n_step_return > 1:
self.samples_timeout_n = buffer_from_example(
self.samples.timeout[0, 0], (self.T, self.B),
share_memory=self.async_)
else:
self.samples_timeout_n = self.samples.timeout
def extract_batch(self, T_idxs, B_idxs):
batch = super().extract_batch(T_idxs, B_idxs)
batch = SamplesFromReplayTL(*batch,
timeout=self.samples.timeout[T_idxs, B_idxs],
timeout_n=self.samples_timeout_n[T_idxs, B_idxs],
)
return torchify_buffer(batch)
def compute_returns(self, T):
super().compute_returns(T)
if self.n_step_return == 1:
return # timeout_n = timeout
# Propagate timeout backwards into timeout_n, like done and done_n.
t, nm1 = self.t, self.n_step_return - 1
if t - nm1 >= 0 and t + T <= self.T:
idxs = slice(t - nm1, t - nm1 + T)
to_idxs = slice(t, t + T)
else:
idxs = np.arange(t - nm1, t - nm1 + T) % T
to_idxs = np.arange(t, t + T) % T
self.samples_timeout_n[idxs] = (self.done_n[idxs] *
self.samples.timeout[to_idxs])
class TlUniformReplayBuffer(UniformReplay, NStepTimeLimitBuffer):
pass
class TlPrioritizedReplayBuffer(PrioritizedReplay, NStepTimeLimitBuffer):
pass
class AsyncTlUniformReplayBuffer(AsyncReplayBufferMixin,
TlUniformReplayBuffer):
pass
class AsyncTlPrioritizedReplayBuffer(AsyncReplayBufferMixin,
TlPrioritizedReplayBuffer):
pass
|
{"hexsha": "9cf3518a21621c2de17079fae0411364460a5d42", "size": 2318, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlpyt/replays/non_sequence/time_limit.py", "max_stars_repo_name": "cambel/rlpyt", "max_stars_repo_head_hexsha": "96e231d6c77ba5ff06dd09f6e9c8837f0abb1a89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-12-07T11:10:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:18:13.000Z", "max_issues_repo_path": "rlpyt/replays/non_sequence/time_limit.py", "max_issues_repo_name": "cambel/rlpyt", "max_issues_repo_head_hexsha": "96e231d6c77ba5ff06dd09f6e9c8837f0abb1a89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-20T01:59:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-08T09:19:43.000Z", "max_forks_repo_path": "rlpyt/replays/non_sequence/time_limit.py", "max_forks_repo_name": "cambel/rlpyt", "max_forks_repo_head_hexsha": "96e231d6c77ba5ff06dd09f6e9c8837f0abb1a89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-19T14:40:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T15:56:38.000Z", "avg_line_length": 33.5942028986, "max_line_length": 76, "alphanum_fraction": 0.6794650561, "include": true, "reason": "import numpy", "num_tokens": 586}
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific
"""Class to test point sampling operations"""
import os
import sys
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow_graphics.util import test_case
from pylib.pc import PointCloud
from pylib.pc import Grid
from pylib.pc import sample
from pylib.pc import Neighborhood
from pylib.pc.tests import utils
class SamplingTest(test_case.TestCase):
@parameterized.parameters(
(100, 8, 0.1, 3),
(100, 8, 0.1, 3),
(100, 16, 0.1, 4)
)
def test_sampling_poisson_disk_on_random(
self, num_points, batch_size, cell_size, dimension):
cell_sizes = np.float32(np.repeat(cell_size, dimension))
points, batch_ids = utils._create_random_point_cloud_segmented(
batch_size, num_points * batch_size, dimension=dimension,
sizes=np.ones(batch_size, dtype=int) * num_points)
point_cloud = PointCloud(points, batch_ids)
grid = Grid(point_cloud, cell_sizes)
neighborhood = Neighborhood(grid, cell_sizes)
sampled_point_cloud, _ = sample(neighborhood, 'poisson')
sampled_points = sampled_point_cloud._points.numpy()
sampled_batch_ids = sampled_point_cloud._batch_ids.numpy()
min_dist = 1.0
for i in range(batch_size):
indices = np.where(sampled_batch_ids == i)
diff = np.expand_dims(sampled_points[indices], 1) - \
np.expand_dims(sampled_points[indices], 0)
dists = np.linalg.norm(diff, axis=2)
dists = np.sort(dists, axis=1)
min_dist = min(min_dist, np.amin(dists[:, 1]))
self.assertLess(min_dist, cell_size + 1e-3)
@parameterized.parameters(
(6, 1),
(100, 5)
)
def test_sampling_poisson_disk_on_uniform(self, num_points_sqrt, scale):
points = utils._create_uniform_distributed_point_cloud_2D(
num_points_sqrt, scale=scale)
cell_sizes = scale * np.array([2, 2], dtype=np.float32) \
/ num_points_sqrt
batch_ids = np.zeros([len(points)])
point_cloud = PointCloud(points, batch_ids)
grid = Grid(point_cloud, cell_sizes)
neighborhood = Neighborhood(grid, cell_sizes)
sample_point_cloud, _ = sample(neighborhood, 'poisson')
sampled_points = sample_point_cloud._points.numpy()
expected_num_pts = num_points_sqrt ** 2 // 2
self.assertTrue(len(sampled_points) == expected_num_pts)
@parameterized.parameters(
(100, 2, 0.1, 3),
(100, 8, 0.7, 3),
(50, 2, np.sqrt(3), 3),
)
def test_sampling_average_on_random(
self, num_points, batch_size, cell_size, dimension):
cell_sizes = np.repeat(cell_size, dimension)
points, batch_ids = utils._create_random_point_cloud_segmented(
batch_size, num_points * batch_size, dimension=dimension,
sizes=np.ones(batch_size, dtype=int) * num_points)
#print(points.shape, batch_ids.shape)
point_cloud = PointCloud(points=points, batch_ids=batch_ids)
grid = Grid(point_cloud, cell_sizes)
neighborhood = Neighborhood(grid, cell_sizes)
sample_point_cloud, _ = sample(neighborhood, 'average')
sampled_points_tf = sample_point_cloud._points.numpy()
sorted_keys = neighborhood._grid._sorted_keys.numpy()
sorted_points = neighborhood._grid._sorted_points.numpy()
sampled_points_numpy = []
cur_point = np.repeat(0.0, dimension)
cur_key = -1
cur_num_points = 0.0
for pt_id, cur_key_point in enumerate(sorted_keys):
if cur_key_point != cur_key:
if cur_key != -1:
cur_point /= cur_num_points
sampled_points_numpy.append(cur_point)
cur_key = cur_key_point
cur_point = [0.0, 0.0, 0.0]
cur_num_points = 0.0
cur_point += sorted_points[pt_id]
cur_num_points += 1.0
cur_point /= cur_num_points
sampled_points_numpy.append(cur_point)
equal = True
for point_numpy in sampled_points_numpy:
found = False
for point_tf in sampled_points_tf:
if np.all(np.abs(point_numpy - point_tf) < 0.0001):
found = True
equal = equal and found
self.assertTrue(equal)
if __name__ == '__main__':
test_case.main()
|
{"hexsha": "69284c726a35484b66a007fd7755ca1de73bc4d4", "size": 4574, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylib/pc/tests/sample_test.py", "max_stars_repo_name": "schellmi42/tensorflow_graphics_point_clouds", "max_stars_repo_head_hexsha": "c8e2dc2963c3eecfb27542449603f81d78494783", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-10T12:07:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T09:28:55.000Z", "max_issues_repo_path": "pylib/pc/tests/sample_test.py", "max_issues_repo_name": "schellmi42/tensorflow_graphics_point_clouds", "max_issues_repo_head_hexsha": "c8e2dc2963c3eecfb27542449603f81d78494783", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pylib/pc/tests/sample_test.py", "max_forks_repo_name": "schellmi42/tensorflow_graphics_point_clouds", "max_forks_repo_head_hexsha": "c8e2dc2963c3eecfb27542449603f81d78494783", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-11T08:27:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-11T08:27:44.000Z", "avg_line_length": 35.1846153846, "max_line_length": 74, "alphanum_fraction": 0.7070397901, "include": true, "reason": "import numpy", "num_tokens": 1167}
|
import attr
import torch
import numpy
import pytest
import tattr
def test_attrib_metadata():
"""tattrs are defined by metadata on attrs classes.
* Dispatches through ``attr.s`` if the class is not already attr-ed.
* Creates __tattr_attrs__ entries for any attribs with "tensor" metadata.
* Captures the target tensor type from the attr.ib.
"""
@tattr.s
class Point(object):
foo = attr.ib()
x = attr.ib(metadata=dict(tensor=torch.Tensor))
y = attr.ib(metadata=dict(tensor=numpy.ndarray))
len(attr.fields(Point)) == 3
len(tattr.tensor_fields(Point)) == 2
tattr.tensor_fields(Point).x.tensor_type == torch.Tensor
tattr.tensor_fields(Point).y.tensor_type == numpy.ndarray
def test_tensor_fields():
"""Tensor fields accessor accesses tensor attributes.
* Returns tuple of tensor attributes, accessible by name or position.
* Raises TypeError if passed an instance.
* Raises NotTattrsClassError if passed a non-tattrs type.
* Raises TypeError if passed an unrelated value
"""
# passed class
@tattr.s
class Point(object):
foo = attr.ib()
x = attr.ib(metadata=dict(tensor=torch.Tensor))
y = attr.ib(metadata=dict(tensor=numpy.ndarray))
len(attr.fields(Point)) == 3
len(tattr.tensor_fields(Point)) == 2
# Access by name
tattr.tensor_fields(Point).x.tensor_type == torch.Tensor
tattr.tensor_fields(Point).y.tensor_type == numpy.ndarray
# Access by position
x, y = tattr.tensor_fields(Point)
x.tensor_type == torch.Tensor
y.tensor_type == numpy.ndarray
# passed instance
with pytest.raises(TypeError):
tattr.tensor_fields(Point([0], [0]))
# attrs, not tattrs, class
@attr.s
class AttrPoint(object):
foo = attr.ib()
x = attr.ib(metadata=dict(tensor=torch.Tensor))
y = attr.ib(metadata=dict(tensor=numpy.ndarray))
len(attr.fields(Point)) == 3
with pytest.raises(tattr.exceptions.NotTattrsClassError):
tattr.tensor_fields(AttrPoint)
# Unrelated data types
with pytest.raises(tattr.exceptions.NotTattrsClassError):
tattr.tensor_fields(int)
with pytest.raises(TypeError):
tattr.tensor_fields(1663)
|
{"hexsha": "8356cb3d8e5d8af4bea2c62a5c4542c006316c51", "size": 2267, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_primitive.py", "max_stars_repo_name": "uw-ipd/tattrs", "max_stars_repo_head_hexsha": "267314a0f315035862392e7ebe35aa15a07549f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-15T01:24:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-15T18:03:06.000Z", "max_issues_repo_path": "tests/test_primitive.py", "max_issues_repo_name": "uw-ipd/tattrs", "max_issues_repo_head_hexsha": "267314a0f315035862392e7ebe35aa15a07549f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_primitive.py", "max_forks_repo_name": "uw-ipd/tattrs", "max_forks_repo_head_hexsha": "267314a0f315035862392e7ebe35aa15a07549f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6705882353, "max_line_length": 77, "alphanum_fraction": 0.6735774151, "include": true, "reason": "import numpy", "num_tokens": 522}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 22:54:02 2019
@author: alex
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.control import controller
###############################################################################
class kinematicInputs( controller.StaticController ) :
"""
Simple proportionnal compensator
----------------------------------------
r : reference signal_proc vector k x 1
y : sensor signal_proc vector k x 1
u : control inputs vector k x 1
t : time 1 x 1
----------------------------------------
u = c( y , r , t ) = (r - y) * gain
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, k = 1):
""" """
# Dimensions
self.k = 3
self.m = 2
self.p = 3
controller.StaticController.__init__(self, self.k, self.m, self.p)
# Label
self.name = 'Proportionnal Controller'
# Gains
self.gain = 1
#############################
def c( self , y , r , t = 0 ):
"""
Feedback static computation u = c(y,r,t)
INPUTS
y : sensor signal_proc vector p x 1
r : reference signal_proc vector k x 1
t : time 1 x 1
OUPUTS
u : control inputs vector m x 1
"""
max_steer = 0.15
max_speed = 5.0000
u = np.zeros(self.m) # State derivative vector
if t<1:
u[0] = 0
#u[0] = max_steer
u[1] = max_speed
#u[1] = 0
elif(t>=1 and t<=3):
u[0] = float(max_steer/2.0000*(t-1.0000))
#u[0] = max_steer
#u[1] = float(max_speed/40.0000*(t-10.0000))
u[1] = max_speed
elif(t>3 and t<=90):
#u[0] = float(max_steer-max_steer/40.0000*(t-50.0000))
u[0] = max_steer
u[1] = max_speed
else:
#u[0] = 0
u[0] = max_steer
u[1] = max_speed
return u
class dynLongVelInputs( controller.StaticController ) :
"""
Simple proportionnal compensator
---------------------------------------
r : reference signal_proc vector k x 1
y : sensor signal_proc vector k x 1
u : control inputs vector k x 1
t : time 1 x 1
---------------------------------------
u = c( y , r , t ) = (r - y) * gain
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, k = 1):
""" """
# Dimensions
self.k = 5
self.m = 2
self.p = 5
controller.StaticController.__init__(self, self.k, self.m, self.p)
# Label
self.name = 'Proportionnal Controller'
# Gains
self.gain = 1
#############################
def c( self , y , r , t = 0 ):
"""
Feedback static computation u = c(y,r,t)
INPUTS
y : sensor signal_proc vector p x 1
r : reference signal_proc vector k x 1
t : time 1 x 1
OUPUTS
u : control inputs vector m x 1
"""
max_steer = 0.15
max_speed = 15.0000
u = np.zeros(self.m) # State derivative vector
if t<1:
u[0] = 0
#u[0] = max_steer
u[1] = max_speed
#u[1] = 0
elif(t>=1 and t<=3):
u[0] = float(max_steer/2.0000*(t-1.0000))
#u[0] = max_steer
#u[1] = float(max_speed/40.0000*(t-10.0000))
u[1] = max_speed
elif(t>3 and t<=90):
#u[0] = float(max_steer-max_steer/40.0000*(t-50.0000))
u[0] = max_steer
u[1] = max_speed
else:
#u[0] = 0
u[0] = max_steer
u[1] = max_speed
return u
class dynLongForcesInputs( controller.StaticController ) :
"""
Simple proportionnal compensator
---------------------------------------
r : reference signal_proc vector k x 1
y : sensor signal_proc vector k x 1
u : control inputs vector k x 1
t : time 1 x 1
---------------------------------------
u = c( y , r , t ) = (r - y) * gain
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, k = 1):
""" """
# Dimensions
self.k = 6
self.m = 3
self.p = 6
controller.StaticController.__init__(self, self.k, self.m, self.p)
# Label
self.name = 'Proportionnal Controller'
# Gains
self.gain = 1
#############################
def c( self , y , r , t = 0 ):
"""
Feedback static computation u = c(y,r,t)
INPUTS
y : sensor signal_proc vector p x 1
r : reference signal_proc vector k x 1
t : time 1 x 1
OUPUTS
u : control inputs vector m x 1
"""
u = np.zeros(self.m) # State derivative vector
steer_max = 0.3
if (t>=0 and t<25):
u[0] = steer_max/25*t
u[2] = 1500
elif(t>=25 and t<50):
u[0] = steer_max
#u[0] = steer_max-steer_max/25*(t-25)
u[2] = 0
else:
#u[0] = 0
u[0] = steer_max
u[2] = 0
u[1] = 0
return u
class fullDynTorqueInputs( controller.StaticController ) :
"""
Simple proportionnal compensator
---------------------------------------
r : reference signal_proc vector k x 1
y : sensor signal_proc vector k x 1
u : control inputs vector k x 1
t : time 1 x 1
---------------------------------------
u = c( y , r , t ) = (r - y) * gain
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, k = 1):
""" """
# Dimensions
self.k = 8
self.m = 3
self.p = 8
controller.StaticController.__init__(self, self.k, self.m, self.p)
# Label
self.name = 'Proportionnal Controller'
# Gains
self.gain = 1
#############################
def c( self , y , r , t = 0 ):
"""
Feedback static computation u = c(y,r,t)
INPUTS
y : sensor signal_proc vector p x 1
r : reference signal_proc vector k x 1
t : time 1 x 1
OUPUTS
u : control inputs vector m x 1
"""
u = np.zeros(self.m) # State derivative vector
#steer_max = 0.3
torque_max = 1000
if t<10:
u[0] = 0
u[2] = 1000
elif(t>=10 and t<20):
u[0] = 0
#u[0] = steer_max/40*(t-10)
u[2] = 1000-1000/10.000*(t-10)
elif(t>=20 and t<25):
u[0] = 0
u[2] = 0
elif(t>=25 and t<35):
u[0] = 0
#u[0] = steer_max
#u[2] = 0
u[2] = -torque_max/10.0000*(t-25)
else:
u[0] = 0
u[2] = -torque_max
u[1] = 0
return u
class fullDynVoltInputs( controller.StaticController ) :
"""
Simple proportionnal compensator
---------------------------------------
r : reference signal_proc vector k x 1
y : sensor signal_proc vector k x 1
u : control inputs vector k x 1
t : time 1 x 1
---------------------------------------
u = c( y , r , t ) = (r - y) * gain
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, k = 1):
""" """
# Dimensions
self.k = 7
self.m = 2
self.p = 7
controller.StaticController.__init__(self, self.k, self.m, self.p)
# Label
self.name = 'Proportionnal Controller'
# Gains
self.gain = 1
#############################
def c( self , y , r , t = 0 ):
"""
Feedback static computation u = c(y,r,t)
INPUTS
y : sensor signal_proc vector p x 1
r : reference signal_proc vector k x 1
t : time 1 x 1
OUPUTS
u : control inputs vector m x 1
"""
u = np.zeros(self.m) # State derivative vector
#steer_max = 0.3
volt_max = 8
steer_max = 0.3
if t<10:
u[0] = 0
#u[1] = 0
u[1] = 5
elif(t>=10 and t<20):
u[0] = steer_max/10.000*(t-10)
#u[1] = volt_max/10.000*(t-10)
u[1] = 5
elif(t>=20 and t<25):
u[0] = steer_max
#u[1] = volt_max
u[1] = 5
elif(t>=25 and t<35):
u[0] = steer_max-steer_max/10.0000*(t-25)
#u[1] = volt_max-volt_max/10.0000*(t-25)
u[1] = 5
else:
u[0] = 0
#u[1] = -volt_max
u[1] = 5
return u
|
{"hexsha": "d4a3a4a874620c817d936242db12bf7be958bfde", "size": 10885, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/vehicle_modeling/test_vehicle_controllers.py", "max_stars_repo_name": "simonchamorro/pyro", "max_stars_repo_head_hexsha": "a637d61e1d49b22f178b3889dc0092c9e1704adc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projects/vehicle_modeling/test_vehicle_controllers.py", "max_issues_repo_name": "simonchamorro/pyro", "max_issues_repo_head_hexsha": "a637d61e1d49b22f178b3889dc0092c9e1704adc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projects/vehicle_modeling/test_vehicle_controllers.py", "max_forks_repo_name": "simonchamorro/pyro", "max_forks_repo_head_hexsha": "a637d61e1d49b22f178b3889dc0092c9e1704adc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5569620253, "max_line_length": 79, "alphanum_fraction": 0.3638952687, "include": true, "reason": "import numpy", "num_tokens": 2778}
|
# Author: Javad Amirian
# Email: amiryan.j@gmail.com
import glob
import os
import cv2
import numpy as np
def make_bg_image_from_screenshots(im_files):
im_sum = None
for im_file in im_files:
im_i = cv2.imread(im_file)
if im_sum is None:
im_sum = im_i.astype(np.float)
else:
im_sum += im_i.astype(np.float)
im_sum = (im_sum/len(im_files)).astype(np.uint8)
cv2.imshow("bg", im_sum)
cv2.waitKeyEx()
return im_sum
if __name__ == "__main__":
eth_hotel_dir = "/home/cyrus/Pictures/ETH-Hotel"
files = glob.glob(eth_hotel_dir + "/*.png")
im_avg = make_bg_image_from_screenshots(files)
cv2.imwrite(os.path.join(eth_hotel_dir, "avg.jpg"), im_avg)
|
{"hexsha": "ae75c63549de1f6903f68bbc43516f1f8970610f", "size": 731, "ext": "py", "lang": "Python", "max_stars_repo_path": "opentraj/toolkit/ui/build_background_image.py", "max_stars_repo_name": "fengzileee/OpenTraj", "max_stars_repo_head_hexsha": "71fdfd1e3420d6a3859ae0acaa4acf85abbc1f64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 232, "max_stars_repo_stars_event_min_datetime": "2020-08-26T10:16:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:39:44.000Z", "max_issues_repo_path": "opentraj/toolkit/ui/build_background_image.py", "max_issues_repo_name": "fengzileee/OpenTraj", "max_issues_repo_head_hexsha": "71fdfd1e3420d6a3859ae0acaa4acf85abbc1f64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-09-25T15:50:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:06:07.000Z", "max_forks_repo_path": "opentraj/toolkit/ui/build_background_image.py", "max_forks_repo_name": "fengzileee/OpenTraj", "max_forks_repo_head_hexsha": "71fdfd1e3420d6a3859ae0acaa4acf85abbc1f64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 63, "max_forks_repo_forks_event_min_datetime": "2020-08-24T13:45:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T08:39:52.000Z", "avg_line_length": 23.5806451613, "max_line_length": 63, "alphanum_fraction": 0.6621067031, "include": true, "reason": "import numpy", "num_tokens": 210}
|
base_vals = Base.ImmutableDict(DNA_A=>0,DNA_C=>1,DNA_G=>2,DNA_T=>3)
function get_mer_idx(mer,k=5)
idx = 0
@turbo for i in 1:k
idx = 4*idx + base_vals[mer[i]]
end
return idx +1
end
function count_mers_4_dist(seq,k = 5)
counts = zeros(Int,4^k)
for mer in each(DNAMer{k}, seq)
current_mer = mer.fw
current_mer_idx = get_mer_idx(current_mer)
counts[current_mer_idx] += 1
end
return counts
end
function count_mers_4_dist!(seq,mer_array,i,k = 5)
for mer in each(DNAMer{k}, seq)
current_mer = mer.fw
current_mer_idx = get_mer_idx(current_mer)
mer_array[current_mer_idx,i] += 1
end
end
function kord_distance(n₁,n₂,k = 5)
l1,l2 = length(n₁), length(n₂)
if l1 == l2
dist = 0.0
step = k-1
for i in 1:l1-step
dist += n₁[i:i+step] == n₂[i:i+step]
end
return 1-dist/(l1-step)
else
return -1.0
end
end
kmer_distance(n₁,n₂,L1,L2,k=5) = 1-sum(min.(n₁, n₂))/(min(L1,L2) - k + 1)
function kmer_distance(derep,k = 5)
seqs = derep.sequence
n = length(seqs)
mer_array = zeros(Int,4^k,n)
for i in 1:n
count_mers_4_dist!(seqs[i],mer_array,i)
end
dist_mat = Array{Float64}(undef,n,n)
dist_mat = zeros(n,n)
for i in 1:n
for j in 1:i
dist_mat[i,j] = kmer_distance(mer_array[:,i],mer_array[:,j],length(seqs[i]),length(seqs[j]),k)
end
end
return dist_mat
end
function kmer_count(derep,k = 5)
seqs = derep.sequence
n = length(seqs)
mer_array = zeros(Int,4^k,n)
for i in 1:n
count_mers_4_dist!(seqs[i],mer_array,i)
end
return mer_array
end
|
{"hexsha": "501468f0da9e27eb03bc3edb0a91dd1ad1289be1", "size": 1700, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/kmers.jl", "max_stars_repo_name": "EvoArt/DenoiseDNA.jl", "max_stars_repo_head_hexsha": "2e4ac9079469ee1cdb87b6d00943a80cfc42f954", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/kmers.jl", "max_issues_repo_name": "EvoArt/DenoiseDNA.jl", "max_issues_repo_head_hexsha": "2e4ac9079469ee1cdb87b6d00943a80cfc42f954", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kmers.jl", "max_forks_repo_name": "EvoArt/DenoiseDNA.jl", "max_forks_repo_head_hexsha": "2e4ac9079469ee1cdb87b6d00943a80cfc42f954", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6376811594, "max_line_length": 106, "alphanum_fraction": 0.5964705882, "num_tokens": 600}
|
#ヒルベルト行列のcondition numberを求める
import scipy.linalg as linalg
import numpy as np
from numpy import linalg as LA
def calc_hilbert_condition(n):
A = np.zeros((n,n))
for i in range(n):
for j in range(n):
A[i][j] = 1/(i+j+1)
print("matrix size =", n)
print("condition number is", LA.cond(A, 2))
if __name__ == "__main__":
calc_hilbert_condition(3)
calc_hilbert_condition(6)
calc_hilbert_condition(9)
|
{"hexsha": "47e64602e38677700cc58c1386b798ee9b042d73", "size": 444, "ext": "py", "lang": "Python", "max_stars_repo_path": "Numerical_Analysis/report/5/3.py", "max_stars_repo_name": "yoshi-ki/BACHELOR", "max_stars_repo_head_hexsha": "65d01c62ab2ea4a6d2616a6b6c535bd4f1645630", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Numerical_Analysis/report/5/3.py", "max_issues_repo_name": "yoshi-ki/BACHELOR", "max_issues_repo_head_hexsha": "65d01c62ab2ea4a6d2616a6b6c535bd4f1645630", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Numerical_Analysis/report/5/3.py", "max_forks_repo_name": "yoshi-ki/BACHELOR", "max_forks_repo_head_hexsha": "65d01c62ab2ea4a6d2616a6b6c535bd4f1645630", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6666666667, "max_line_length": 47, "alphanum_fraction": 0.6576576577, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 141}
|
"""
Script for training a Random Forest model on fingerprint representations of molecules.
"""
import os
import warnings
import argparse
import pandas as pd
import numpy as np
from rdkit.Chem import MolFromSmiles, AllChem
from rdkit import DataStructs
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from helper import scaffold_split
from parse_data import parse_dataset
def generate_fingerprints(smile):
mol = MolFromSmiles(smile)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius=3, nBits=1024)
array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp, array)
#print(array)
return array
def fit_forest(X,y):
# params = {'n_estimators': [100, 1000, 10000], 'max_depth': [1, 2, 3], 'min_samples_split': [2, 4]}
# search = GridSearchCV(RandomForestRegressor(), params, cv=5)
# model = search.fit(X, y).best_estimator_
model = RandomForestRegressor(n_estimators=100)
return model.fit(X,y)
def main(args):
warnings.filterwarnings('ignore')
print('\nTraining ECFP-RF on ' + args.task + ' dataset')
print('\nGenerating features...')
if args.task=='IC50':
print('Subtask: {}'.format(args.subtask))
smiles_list, y = parse_dataset(args.task, subtask=args.subtask)
X = np.arange(len(smiles_list)).reshape(-1,1) # array of data indices
r2_list = []
rmse_list = []
print('\nBeginning training loop...')
j = 0
for i in range(args.n_runs):
if args.split == 'random':
kf = KFold(n_splits=args.n_folds, random_state=i, shuffle=True)
split_list = kf.split(X)
elif args.split == 'scaffold':
train_ind, test_ind = scaffold_split(smiles_list, seed=i)
split_list = [train_ind, test_ind]
for train_ind, test_ind in split_list:
y_train, y_test = y[train_ind], y[test_ind]
smiles_df = pd.DataFrame(smiles_list, columns=['smiles'])
train_smiles = smiles_df.iloc[train_ind]['smiles'].to_list()
test_smiles = smiles_df.iloc[test_ind]['smiles'].to_list()
X_train = np.asarray([generate_fingerprints(s) for s in train_smiles])
X_test = np.asarray([generate_fingerprints(s) for s in test_smiles])
m = fit_forest(X_train, y_train)
y_pred = m.predict(X_test)
score = r2_score(y_test, y_pred)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print("\nR^2: {:.3f}".format(score))
print("RMSE: {:.3f}".format(rmse))
r2_list.append(score)
rmse_list.append(rmse)
# np.savetxt('results/ecfp_'+task+'_split_'+split+'_run_'+str(j)+'_ypred.txt', y_pred)
# np.savetxt('results/ecfp_'+task+'_split_'+split+'_run_'+str(j)+'_ytest.txt', y_test)
# np.savetxt('results/ecfp_'+task+'_split_'+split+'_run_'+str(j)+'_ystd.txt', np.sqrt(y_var))
j += 1
print("\nmean R^2: {:.4f} +- {:.4f}".format(np.mean(r2_list), np.std(r2_list) / np.sqrt(len(r2_list))))
print("mean RMSE: {:.4f} +- {:.4f}".format(np.mean(rmse_list), np.std(rmse_list) / np.sqrt(len(rmse_list))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-task', type=str, default='IC50',
help='Dataset on which to train ECFP-RF')
parser.add_argument('-subtask', type=str, default='A2a',
help='Dataset on which to train ECFP-RF')
parser.add_argument('-split', type=str, default='random',
help='Train/Test splitting method. Possible choices: random/scaffold')
parser.add_argument('-n_runs', type=int, default=3,
help='number of runs for train/test split.')
parser.add_argument('-n_folds', type=int, default=5,
help='number of folds in K-fold cross-validation. Only for random splitting')
args = parser.parse_args()
main(args)
|
{"hexsha": "dc76343107c75e04fe0518fb84c8b8043bceecf5", "size": 4148, "ext": "py", "lang": "Python", "max_stars_repo_path": "morgan_rf.py", "max_stars_repo_name": "wjm41/soapgp", "max_stars_repo_head_hexsha": "ef57cebb7413abb96b54983141e188dff5166d03", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-05-02T19:50:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T16:07:52.000Z", "max_issues_repo_path": "morgan_rf.py", "max_issues_repo_name": "SuperXiang/soapgp", "max_issues_repo_head_hexsha": "ef57cebb7413abb96b54983141e188dff5166d03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-09T20:47:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-16T21:01:35.000Z", "max_forks_repo_path": "morgan_rf.py", "max_forks_repo_name": "SuperXiang/soapgp", "max_forks_repo_head_hexsha": "ef57cebb7413abb96b54983141e188dff5166d03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-11-22T17:23:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T05:47:06.000Z", "avg_line_length": 37.7090909091, "max_line_length": 112, "alphanum_fraction": 0.6424783028, "include": true, "reason": "import numpy", "num_tokens": 1043}
|
using JuMP, Base.Test, AmplNLWriter
# solver = AmplNLSolver(Ipopt.amplexe, ["print_level=0"])
# Note min and max not implemented in Couenne
## Solve test problem with simple min functions
#
# max min( x^2, x )
# s.t. -0.5 <= x <= 0.5
#
# The optimal objective value is 0.25.
# x = 0.5
##
@testset "example: maxmin" begin
m = Model(solver=solver)
@variable(m, -0.5 <= x <= 0.5, start = 0.25)
@NLobjective(m, Max, min(x^2, 0.3, x))
@test solve(m) == :Optimal
@test isapprox(getobjectivevalue(m), 0.25, atol=1e-2)
@test isapprox(getvalue(x), 0.5, atol=1e-2)
end
## Solve test problem with simple max functions
#
# min max( x^2, x )
# s.t. -0.5 <= x <= 0.5
#
# The optimal objective value is 0.
# x = 0.
##
@testset "example: minmax" begin
m = Model(solver=solver)
@variable(m, -1 <= x <= 1, start=-1)
@NLobjective(m, Min, max(x^2, x, -1))
@test solve(m) == :Optimal
@test isapprox(getobjectivevalue(m), 0, atol=1e-2)
@test isapprox(getvalue(x), 0, atol=1e-2)
end
|
{"hexsha": "7b6acf14fc4229e002d426a72c968d487f96df0f", "size": 1046, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/jump_maxmin.jl", "max_stars_repo_name": "tkoolen/AmplNLWriter.jl", "max_stars_repo_head_hexsha": "c3ab02096a96122bcad0b04067990611d0492c41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/jump_maxmin.jl", "max_issues_repo_name": "tkoolen/AmplNLWriter.jl", "max_issues_repo_head_hexsha": "c3ab02096a96122bcad0b04067990611d0492c41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/jump_maxmin.jl", "max_forks_repo_name": "tkoolen/AmplNLWriter.jl", "max_forks_repo_head_hexsha": "c3ab02096a96122bcad0b04067990611d0492c41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.15, "max_line_length": 57, "alphanum_fraction": 0.5965583174, "num_tokens": 389}
|
import unittest
import numpy as np
import scipy.signal as signal
import filterdesigner.FIRDesign as FIRDesign
class TestSgolay(unittest.TestCase):
def setUp(self):
self.order = 4
self.framelen = 21
def test_sgolay(self):
# Test case for sgolay
FIR = FIRDesign.sgolay(self.order, self.framelen)
fir = signal.savgol_coeffs(self.framelen, self.order)
self.assertTrue(np.all(FIR[0] == fir))
|
{"hexsha": "54781c1d046004ccf0c543f27cce945aebe6cde7", "size": 470, "ext": "py", "lang": "Python", "max_stars_repo_path": "filterdesigner/tests/test_sgolay.py", "max_stars_repo_name": "Yuki-F-HCU/filterdesigner", "max_stars_repo_head_hexsha": "bb735d507da0338b2925f84e54df091ce1c32f95", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-15T07:22:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-17T08:50:17.000Z", "max_issues_repo_path": "filterdesigner/tests/test_sgolay.py", "max_issues_repo_name": "Yuki-F-HCU/filterdesigner", "max_issues_repo_head_hexsha": "bb735d507da0338b2925f84e54df091ce1c32f95", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "filterdesigner/tests/test_sgolay.py", "max_forks_repo_name": "Yuki-F-HCU/filterdesigner", "max_forks_repo_head_hexsha": "bb735d507da0338b2925f84e54df091ce1c32f95", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-15T07:23:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T10:03:32.000Z", "avg_line_length": 27.6470588235, "max_line_length": 62, "alphanum_fraction": 0.6468085106, "include": true, "reason": "import numpy,import scipy", "num_tokens": 118}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
from ctypes import c_char_p, c_double, c_int
import numpy as np
import os
import time
import threading
import cv2
from ikalog.utils import *
c_int_p = ctypes.POINTER(c_int)
class VideoInputWrapper(object):
VI_COMPOSITE = 0
VI_S_VIDEO = 1
VI_TUNER = 2
VI_USB = 3
VI_1394 = 4
VI_NTSC_M = 0
VI_PAL_B = 1
VI_PAL_D = 2
VI_PAL_G = 3
VI_PAL_H = 4
VI_PAL_I = 5
VI_PAL_M = 6
VI_PAL_N = 7
VI_PAL_NC = 8
VI_SECAM_B = 9
VI_SECAM_D = 10
VI_SECAM_G = 11
VI_SECAM_H = 12
VI_SECAM_K = 13
VI_SECAM_K1 = 14
VI_SECAM_L = 15
VI_NTSC_M_J = 16
VI_NTSC_433 = 17
VI_MEDIASUBTYPE_RGB24 = 0
VI_MEDIASUBTYPE_RGB32 = 1
VI_MEDIASUBTYPE_RGB555 = 2
VI_MEDIASUBTYPE_RGB565 = 3
VI_MEDIASUBTYPE_YUY2 = 4
VI_MEDIASUBTYPE_YVYU = 5
VI_MEDIASUBTYPE_YUYV = 6
VI_MEDIASUBTYPE_IYUV = 7
VI_MEDIASUBTYPE_UYVY = 8
VI_MEDIASUBTYPE_YV12 = 9
VI_MEDIASUBTYPE_YVU9 = 0
VI_MEDIASUBTYPE_Y411 = 11
VI_MEDIASUBTYPE_Y41P = 12
VI_MEDIASUBTYPE_Y211 = 13
VI_MEDIASUBTYPE_AYUV = 14
VI_MEDIASUBTYPE_Y800 = 15
VI_MEDIASUBTYPE_Y8 = 16
VI_MEDIASUBTYPE_GREY = 17
VI_MEDIASUBTYPE_MJPG = 18
VI_BGR = 0x01
VI_VERTICAL_FLIP = 0x02
DS_RESOLUTION = 0x01
DS_CONNECTION = 0x20
def __del__(self):
self.dll.VI_Deinit()
def get_device_names(self):
num_devices = c_int(0)
r = self.dll.VI_GetDeviceNames(ctypes.pointer(num_devices))
# ToDo: error validation
return num_devices.value
def get_device_name(self, index):
friendly_name_b = self.dll.VI_GetDeviceName(index)
friendly_name = friendly_name_b.decode('ascii', errors='replace')
return friendly_name
def get_device_list(self):
num_devices = self.get_device_names()
device_list = []
for n in range(num_devices):
device_list.append(self.get_device_name(n))
return device_list
def get_frame_height(self, index):
return self.dll.VI_GetFrameHeight(index)
def get_frame_width(self, index):
return self.dll.VI_GetFrameWidth(index)
def init_device(self, index, settings=None, flags=None, width=None, height=None, connection=None):
settings = np.array([flags, width, height], dtype=np.intc)
return self.dll.VI_InitDevice(index, settings) == 0
def deinit_device(self, index):
self.dll.VI_DeinitDevice(index)
def set_blocking(self, enable):
self.dll.VI_SetBlocking(enable)
def set_framerate(self, index, framerate):
self.dll.VI_SetFramerate(index, framerate)
def has_new_frame(self, index):
return self.dll.VI_HasNewFrame(index)
def get_buffer_size(self, index):
return self.dll.VI_GetBufferSize(index)
def get_buffer_geometry(self, index):
(buf_size, w, h) = (
self.get_buffer_size(index),
self.get_frame_width(index),
self.get_frame_height(index),
)
assert buf_size > 0
bpp = buf_size // (w * h)
assert bpp == int(bpp)
return (h, w, bpp)
def get_pixels(self, index, parameters=0):
geom = self.get_buffer_geometry(index)
frame_buffer = np.zeros(geom, np.uint8)
assert frame_buffer.flags['C_CONTIGUOUS']
retval = self.dll.VI_GetPixels(index, frame_buffer, parameters)
if retval:
return frame_buffer
return None
def _load_library(self):
videoinput_dll = os.path.join('lib', 'videoinput.dll')
ctypes.cdll.LoadLibrary(videoinput_dll)
self.dll = ctypes.CDLL(videoinput_dll)
self.dll.VI_Init.argtypes = []
self.dll.VI_Init.restype = c_int
self.dll.VI_GetDeviceName.argtypes = [c_int]
self.dll.VI_GetDeviceName.restype = c_char_p
self.dll.VI_GetDeviceNames.argtypes = [c_int_p]
self.dll.VI_GetDeviceNames.restype = c_char_p
self.dll.VI_InitDevice.argtypes = [
c_int,
np.ctypeslib.ndpointer(dtype=np.intc, flags='C_CONTIGUOUS'),
]
self.dll.VI_InitDevice.restype = c_int
self.dll.VI_DeinitDevice.argtypes = [c_int]
self.dll.VI_DeinitDevice.restype = None
self.dll.VI_SetBlocking.argtypes = [c_int]
self.dll.VI_SetBlocking.restype = None
self.dll.VI_SetFramerate.argtypes = [c_int, c_double]
self.dll.VI_SetFramerate.restype = None
self.dll.VI_GetFrameHeight.argtypes = [c_int]
self.dll.VI_GetFrameHeight.restype = c_int
self.dll.VI_GetFrameWidth.argtypes = [c_int]
self.dll.VI_GetFrameWidth.restype = c_int
self.dll.VI_HasNewFrame.argtypes = [c_int]
self.dll.VI_HasNewFrame.restype = c_int
self.dll.VI_GetBufferSize.argtypes = [c_int]
self.dll.VI_GetBufferSize.restype = c_int
self.dll.VI_GetPixels.argtypes = [
c_int,
np.ctypeslib.ndpointer(dtype=np.uint8, flags='C_CONTIGUOUS'),
c_int
]
self.dll.VI_GetPixels.restype = c_int
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '__instance__'):
cls.__instance__ = \
super(VideoInputWrapper, cls).__new__(cls, *args, **kwargs)
cls.__instance__._load_library()
cls.__instance__.dll.VI_Init()
return cls.__instance__
|
{"hexsha": "2cb0b2643c54666ea12f68aac59d2f25aff15d14", "size": 6084, "ext": "py", "lang": "Python", "max_stars_repo_path": "ikalog/inputs/win/videoinput_wrapper.py", "max_stars_repo_name": "fetus-hina/IkaLog", "max_stars_repo_head_hexsha": "bd476da541fcc296f792d4db76a6b9174c4777ad", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 285, "max_stars_repo_stars_event_min_datetime": "2015-08-15T14:38:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T15:00:06.000Z", "max_issues_repo_path": "ikalog/inputs/win/videoinput_wrapper.py", "max_issues_repo_name": "fetus-hina/IkaLog", "max_issues_repo_head_hexsha": "bd476da541fcc296f792d4db76a6b9174c4777ad", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 323, "max_issues_repo_issues_event_min_datetime": "2015-09-24T12:21:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-06T16:34:54.000Z", "max_forks_repo_path": "ikalog/inputs/win/videoinput_wrapper.py", "max_forks_repo_name": "fetus-hina/IkaLog", "max_forks_repo_head_hexsha": "bd476da541fcc296f792d4db76a6b9174c4777ad", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 72, "max_forks_repo_forks_event_min_datetime": "2015-08-22T00:18:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T14:44:20.000Z", "avg_line_length": 28.2976744186, "max_line_length": 102, "alphanum_fraction": 0.6586127548, "include": true, "reason": "import numpy", "num_tokens": 1737}
|
#!/usr/bin/env python3
import json
import models
import utils
import argparse,random,logging,numpy,os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm
from time import time
from tqdm import tqdm
from util import load_dataset, make_iter, Params
#encoding=utf-8
from transformers import (
BartForConditionalGeneration,BartModel, BartConfig,PreTrainedTokenizerFast,
Seq2SeqTrainingArguments, Seq2SeqTrainer
)
from kobart import get_pytorch_kobart_model, get_kobart_tokenizer
import torch
from torch.utils.data import random_split
import os
logs_base_dir = "./logs"
#os.makedirs(logs_base_dir, exist_ok=True)
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
logging.basicConfig(level=logging.INFO, format='%(asctime)s [INFO] %(message)s')
parser = argparse.ArgumentParser(description='extractive summary')
# model
parser.add_argument('-save_dir',type=str,default='checkpoints/')
parser.add_argument('-embed_dim',type=int,default=100)
parser.add_argument('-embed_num',type=int,default=100)
parser.add_argument('-pos_dim',type=int,default=50)
parser.add_argument('-pos_num',type=int,default=100)
parser.add_argument('-seg_num',type=int,default=10)
parser.add_argument('-kernel_num',type=int,default=100)
parser.add_argument('-kernel_sizes',type=str,default='3,4,5')
parser.add_argument('-model',type=str,default='RNN_RNN')
parser.add_argument('-hidden_size',type=int,default=16)
# train
parser.add_argument('-lr',type=float,default=1e-3)
parser.add_argument('-batch_size',type=int,default=32)
parser.add_argument('-epochs',type=int,default=50)
parser.add_argument('-seed',type=int,default=1)
parser.add_argument('-train_dir',type=str,default='data/unsup_train.json')
parser.add_argument('-val_dir',type=str,default='data/unsup_valid.json')
parser.add_argument('-embedding',type=str,default='data/embedding.npz')
parser.add_argument('-word2id',type=str,default='data/word2id.json')
parser.add_argument('-report_every',type=int,default=1)
parser.add_argument('-seq_trunc',type=int,default=50)
parser.add_argument('-max_norm',type=float,default=1.0)
# test
parser.add_argument('-load_dir',type=str,default='checkpoints/RNN_RNN_seed_1.pt')
parser.add_argument('-test_dir',type=str,default='data/unsup_test.json')
parser.add_argument('-ref',type=str,default='outputs/ref')
parser.add_argument('-hyp',type=str,default='outputs/hyp')
parser.add_argument('-filename',type=str,default='x.txt') # TextFile to be summarized
parser.add_argument('-topk',type=int,default=15)
# device
parser.add_argument('-device',type=int)
# option
parser.add_argument('-test',action='store_true')
parser.add_argument('-debug',action='store_true')
parser.add_argument('-predict',action='store_true')
args = parser.parse_args()
use_gpu = args.device is not None
if torch.cuda.is_available() and not use_gpu:
print("WARNING: You have a CUDA device, should run with -device 0")
# set cuda device and seed
if use_gpu:
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
def eval(net,model,vocab,data_iter,criterion, epoch):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
input, features,targets,doc_lens, _ = vocab.make_features(batch)
input, features,targets = Variable(input),Variable(features), Variable(targets.float())
if use_gpu:
input = input.cuda()
features = features.cuda()
targets = targets.cuda()
# autoencoder
encoder_output = model.base_model.encoder(input, return_dict=True).last_hidden_state
# encoder_output = encoder_output.cuda()
probs = net(features,doc_lens,encoder_output)
loss = criterion(probs,targets)
#writer.add_scalar("Loss/Valid", loss.data, epoch * len(data_iter) + i)
total_loss += loss.data
batch_num += 1
loss = total_loss / batch_num
net.train()
return loss
def train():
logging.info('Loading vocab,train and val dataset.Wait a second,please')
params = Params('config/params.json')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
vocab = utils.Vocab()
with open(args.train_dir) as f:
examples = [json.loads(line) for line in f]
train_dataset = utils.Dataset(examples)
with open(args.val_dir) as f:
examples = [json.loads(line) for line in f]
val_dataset = utils.Dataset(examples)
"""
ae model 가져오기
"""
#model = BartForConditionalGeneration.from_pretrained('./model/')
model = BartForConditionalGeneration.from_pretrained('gogamza/kobart-base-v2')
if use_gpu:
model.cuda()
model.eval()
# update args
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
args.kernel_sizes = [int(ks) for ks in args.kernel_sizes.split(',')]
# build model
net = getattr(models,args.model)(args,embed)
if use_gpu:
net.cuda()
# load dataset
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False)
# loss function
criterion = nn.BCELoss()
# model info
#print(net)
params = sum(p.numel() for p in list(net.parameters())) / 1e6
print('#Params: %.1fM' % (params))
print(use_gpu)
min_loss = float('inf')
optimizer = torch.optim.Adam(net.parameters(),lr=args.lr)
net.train()
t1 = time()
with torch.autograd.set_detect_anomaly(True):
for epoch in range(1,args.epochs+1):
print("epoch: ", epoch)
for i,batch in enumerate(train_iter):
net.train()
input,features,targets,doc_lens,_ = vocab.make_features(batch)
input,features,targets = Variable(input),Variable(features), Variable(targets.float())
if use_gpu:
input = input.cuda()
features = features.cuda()
targets = targets.cuda()
# autoencoder
encoder_output = model.base_model.encoder(input, return_dict=True).last_hidden_state
#print(encoder_output)
#encoder_output = encoder_output.cuda()
#print(encoder_output.shape)
#tmp = list()
#for eo in encoder_output:
# tmp.append(eo)
probs = net(features,doc_lens,encoder_output)
#try:
loss = criterion(probs,targets)
optimizer.zero_grad()
loss.backward()
writer.add_scalar("Loss/Train", loss.data, epoch * len(train_iter) + i)
clip_grad_norm(net.parameters(), args.max_norm)
optimizer.step()
if args.debug:
print('Batch ID:%d Loss:%f' %(i,loss.data))
continue
#if i % args.report_every == 0:
cur_loss = eval(net,model,vocab,val_iter,criterion, epoch)
writer.add_scalar("Loss/Valid", cur_loss.data, epoch * len(train_iter) + i)
if cur_loss < min_loss:
min_loss = cur_loss
best_path = net.save()
print('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch,min_loss,cur_loss))
logging.info('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f'
% (epoch,min_loss,cur_loss))
#except ValueError:
# continue
writer.flush()
t2 = time()
logging.info('Total Cost:%f h'%((t2-t1)/3600))
writer.close()
def test():
embed = torch.Tensor(np.load(args.embedding)['embedding'])
params = Params('config/params.json')
vocab = utils.Vocab()
with open(args.test_dir) as f:
examples = [json.loads(line) for line in f]
test_dataset = utils.Dataset(examples)
test_iter = DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False)
if use_gpu:
checkpoint = torch.load(args.load_dir)
else:
checkpoint = torch.load(args.load_dir, map_location=lambda storage, loc: storage)
"""
ae model 가져오기
"""
model = BartForConditionalGeneration.from_pretrained('./model/')
if use_gpu:
model = model.cuda()
model.eval()
# checkpoint['args']['device'] saves the device used as train time
# if at test time, we are using a CPU, we must override device to None
if not use_gpu:
checkpoint['args'].device = None
net = getattr(models,checkpoint['args'].model)(checkpoint['args'])
net.load_state_dict(checkpoint['model'])
if use_gpu:
net.cuda()
net.eval()
doc_num = len(test_dataset)
time_cost = 0
file_id = 1
for batch in tqdm(test_iter):
_, features, targets, doc_lens, summaries = vocab.make_features(batch)
features, targets = Variable(features), Variable(targets.float())
t1 = time()
if use_gpu:
features = features.cuda()
targets = targets.cuda()
# autoencoder
encoder_output = model.base_model.encoder(features, return_dict=True).last_hidden_state
# encoder_output = encoder_output.cuda()
tmp = list()
for eo in encoder_output:
tmp.append(eo)
probs = net(features, doc_lens, tmp)
t2 = time()
time_cost += t2 - t1
start = 0
for doc_id,doc_len in enumerate(doc_lens):
print("id: ", doc_id)
ref = summaries[doc_id]
print(ref)
stop = start + doc_len
try:
prob = probs[start:stop]
except IndexError:
continue
topk = min(args.topk,doc_len)
topk_indices = prob.topk(topk)[1].cpu().data.numpy()
doc = batch['documents'][doc_id].split('\n')[:doc_len]
labels = [str(l) for l in sorted(list(topk_indices)[:3])]
print(labels)
hyp = "\n".join(labels)
#hyp = doc[topk_indices[0]]
with open(os.path.join(args.ref,str(file_id)+'.txt'), 'w') as f:
f.write(ref)
with open(os.path.join(args.hyp,str(file_id)+'.txt'), 'w') as f:
f.write(hyp)
start = stop
file_id = file_id + 1
print('Speed: %.2f docs / s' % (doc_num / time_cost))
def predict():
logging.info('Loading vocab, pred dataset.Wait a second,please')
params = Params('config/params.json')
vocab = utils.Vocab()
with open(args.test_dir) as f:
examples = [json.loads(line) for line in f]
pred_dataset = utils.Dataset(examples)
pred_iter = DataLoader(dataset=pred_dataset,
batch_size=args.batch_size,
shuffle=False)
"""
ae
"""
# load
model = BartForConditionalGeneration.from_pretrained('./model/')
if use_gpu:
model.cuda()
model.eval()
"""
extract 모델
"""
if use_gpu:
checkpoint = torch.load('checkpoints/RNN_RNN_seed_1.pt')
else:
checkpoint = torch.load('checkpoints/RNN_RNN_seed_1.pt', map_location=lambda storage, loc: storage)
# checkpoint['args']['device'] saves the device used as train time
# if at test time, we are using a CPU, we must override device to None
#if not use_gpu:
# checkpoint['args'].device = None
print(checkpoint['args'].device)
net = getattr(models,checkpoint['args'].model)(checkpoint['args'])
net.load_state_dict(checkpoint['model'])
if use_gpu:
net.cuda()
net.eval()
"""
predict
"""
doc_num = len(pred_dataset)
time_cost = 0
file_id = 1
for batch in tqdm(pred_iter):
input,features, doc_lens = vocab.make_predict_features(batch)
input,features= Variable(input), Variable(features)
t1 = time()
if use_gpu:
features = features.cuda()
input = input.cuda()
encoder_output = model.base_model.encoder(input, return_dict=True).last_hidden_state
tmp = list()
for eo in encoder_output:
tmp.append(eo)
probs = net(features, doc_lens, tmp)
t2 = time()
time_cost += t2 - t1
start = 0
for doc_id,doc_len in enumerate(doc_lens):
stop = start + doc_len
prob = probs[start:stop]
topk = 1
#topk = min(args.topk,doc_len)
topk_indices = prob.topk(topk)[1].cpu().data.numpy()
topk_indices.sort()
doc = batch['doc'][doc_id].split('\n')[:doc_len]
hyp = [doc[index] for index in topk_indices]
with open(os.path.join(args.hyp,str(file_id)+'.txt'), 'w') as f:
f.write('. '.join(hyp))
start = stop
file_id = file_id + 1
print('Speed: %.2f docs / s' % (doc_num / time_cost))
if __name__=='__main__':
if args.test:
test()
# python main.py -batch_size 1 -predict -load_dir checkpoints/RNN_RNN_seed_1.pt
elif args.predict:
predict()
else:
train()
|
{"hexsha": "44aac7340455240cb503034c9a8a3f6e30c2d7a8", "size": 13614, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "hyunbool/SummaRuNNer", "max_stars_repo_head_hexsha": "2a9fe75fa9d47bd13b2143ecb3f1acb65a11d701", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "hyunbool/SummaRuNNer", "max_issues_repo_head_hexsha": "2a9fe75fa9d47bd13b2143ecb3f1acb65a11d701", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "hyunbool/SummaRuNNer", "max_forks_repo_head_hexsha": "2a9fe75fa9d47bd13b2143ecb3f1acb65a11d701", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6148148148, "max_line_length": 107, "alphanum_fraction": 0.6256059938, "include": true, "reason": "import numpy", "num_tokens": 3152}
|
import numpy as np
import tensorflow as tf
import os
import pickle
import random
from generator import Generator
from mobilenet import MobileNet
from PIL import Image
EMB_DIM = 300 # embedding dimension
HIDDEN_DIM = 300 # hidden state dimension of lstm cell
SEQ_LENGTH = 12 # sequence length
START_TOKEN = 0
SEED = 88
BATCH_SIZE = 1
DICO= 'tang.txt'
DICO_PKL = 'dict.pkl'
IMAGE = 'test/3.jpg'
def create_dico(filename):
dico = {}
dico['unk'] = 1000000
dico['sos'] = 1000001
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
for word in line.strip():
if word != ' ':
if word not in dico:
dico[word] = 1
else:
dico[word] += 1
sorted_items = sorted(dico.items(), key=lambda x: (x[1], x[0]), reverse=True)
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in id_to_item.items()}
return item_to_id, id_to_item
def main():
random.seed(SEED)
np.random.seed(SEED)
assert START_TOKEN == 0
if os.path.exists(DICO_PKL):
with open(DICO_PKL, 'rb') as f:
word_to_id, id_to_word = pickle.load(f)
else:
word_to_id, id_to_word = create_dico(DICO)
with open(DICO_PKL, 'wb') as f:
pickle.dump([word_to_id, id_to_word], f)
vocab_size = len(word_to_id)
generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN, True)
mobilenet = MobileNet(BATCH_SIZE)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# generator.load_weight()
mobilenet.load_pretrained_weights(sess)
sess.run(tf.global_variables_initializer())
im = Image.open(IMAGE).convert('RGB')
im = im.resize((224, 224))
im = np.array(im)
im = np.expand_dims(im, 0)
feed_dict = {
mobilenet.X: im,
mobilenet.is_training: False
}
hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)
samples = generator.generate(sess, hidden_batch)
y = samples.tolist()
for k, sam in enumerate(y):
sa = [id_to_word[i] for i in sam]
sa = ''.join(sa)
print(sa)
if __name__ == '__main__':
main()
|
{"hexsha": "db04d435881721649befc2aad7bc8a6de345bdba", "size": 2424, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "GeneZC/AlphaPoet", "max_stars_repo_head_hexsha": "82715e9cc36aedfa78c250a7a7f8129669eea440", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-10-14T14:58:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-10T14:50:34.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "GeneZC/AlphaPoet", "max_issues_repo_head_hexsha": "82715e9cc36aedfa78c250a7a7f8129669eea440", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "GeneZC/AlphaPoet", "max_forks_repo_head_hexsha": "82715e9cc36aedfa78c250a7a7f8129669eea440", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-07T07:16:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-11T11:33:01.000Z", "avg_line_length": 30.3, "max_line_length": 102, "alphanum_fraction": 0.5940594059, "include": true, "reason": "import numpy", "num_tokens": 640}
|
import torch
from torch import nn, einsum
import numpy as np
from einops import rearrange, repeat
import torch.nn.functional as F
def number_parameters(Net, type_size=8):
para = sum([np.prod(list(p.size())) for p in Net.parameters()])
return para / 1024 * type_size / 1024
class Residual_Connection(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class Layer_Normal(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class MLP_Block(nn.Module):
def __init__(self, dim, hidden_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, dim),
)
def forward(self, x):
return self.net(x)
class MSA_Block(nn.Module):
def __init__(self, dim_seq, num_heads, dim_head):
super().__init__()
dim_inner = dim_head * num_heads
self.num_heads = num_heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim_seq, dim_inner * 3, bias=False)
self.to_out = nn.Linear(dim_inner, dim_seq)
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim=-1)
b, n_l, _, h = *x.shape, self.num_heads
q, k, v = map(lambda t: rearrange(t, 'b nw_l (h d) -> b h nw_l d', h=h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h nw_l d -> b nw_l (h d)')
out = self.to_out(out)
return out
class transformer_block(nn.Module):
def __init__(self, dim_seq, dim_mlp, num_heads, dim_head):
super().__init__()
self.attention_block = Residual_Connection(
Layer_Normal(dim_seq, MSA_Block(dim_seq=dim_seq, num_heads=num_heads, dim_head=dim_head)))
self.mlp_block = Residual_Connection(Layer_Normal(dim_seq, MLP_Block(dim=dim_seq, hidden_dim=dim_mlp)))
def forward(self, x):
x = self.attention_block(x)
x = self.mlp_block(x)
return x
class Transformer_Structure(nn.Module):
def __init__(self, dim_seq=3456, num_heads=3, dim_head=18, num_encoders=8):
super().__init__()
self.order_embedding = nn.Parameter(torch.randn(1, 30, dim_seq))
self.to_trans, self.to_seq = nn.Linear(dim_seq, dim_head), nn.Linear(dim_head, dim_seq)
self.layers = nn.ModuleList([])
for _ in range(num_encoders):
self.layers.append(transformer_block(dim_seq=dim_head, num_heads=num_heads,
dim_mlp=dim_seq * 2, dim_head=dim_head)
)
def forward(self, img):
x = img + self.order_embedding
x = self.to_trans(x)
for layer in self.layers:
x = layer(x)
x = self.to_seq(x)
return x
class Softmax_Classify(nn.Module):
def __init__(self, hidden_size, num_linear, num_class):
super().__init__()
tmp_hidden_size = hidden_size
self.layers = nn.ModuleList([])
for _ in range(num_linear - 1):
self.layers.append(nn.Linear(int(tmp_hidden_size), int(tmp_hidden_size / 2)))
tmp_hidden_size /= 2
self.layers.append(nn.Linear(int(tmp_hidden_size), num_class))
self.soft_max = nn.Softmax(dim=1)
def forward(self, x):
b, l, n = x.shape
x = rearrange(x, 'b l n -> (b l) n')
for layer in self.layers:
x = layer(x)
x = self.soft_max(x)
x = rearrange(x, '(b l) n -> b l n', b=b)
return x
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class Conv3d(nn.Module):
def __init__(self, in_channels, num_levels=4, f_maps=16):
super().__init__()
self.in_channels = in_channels
self.layers = nn.ModuleList([])
for i in range(num_levels):
self.layers.append(conv3x3x3(self.in_channels, f_maps * (2 ** i), stride=1))
self.layers.append(nn.BatchNorm3d(f_maps * (2 ** i)))
self.layers.append(nn.ReLU(inplace=True))
self.layers.append(nn.MaxPool3d(kernel_size=(2, 2, 2), padding=1))
self.in_channels = f_maps * (2 ** i)
def forward(self, x):
b, l, c, n_l, n_h, n_w = x.shape
x = rearrange(x, 'b l c n_l n_h n_w -> (b l) c n_l n_h n_w')
for layer in self.layers:
x = layer(x)
x = rearrange(x, '(b l) c n_l n_h n_w -> b l (c n_l n_h n_w)', l=l)
return x
class transformer_network(nn.Module):
def __init__(self, *, in_channels=1, num_levels=4, f_maps=16, dim_hidden=3456, num_heads=3, dim_head=18,
num_encoders=8, num_linear=2, num_class=2):
super().__init__()
self._3dcnn = Conv3d(in_channels=in_channels, num_levels=num_levels, f_maps=f_maps)
self.transformer_structure = Transformer_Structure(dim_seq=dim_hidden, num_heads=num_heads,
dim_head=dim_head, num_encoders=num_encoders)
self.softmax_classify = Softmax_Classify(hidden_size=dim_hidden, num_linear=num_linear, num_class=num_class)
def forward(self, img):
x = rearrange(img, 'b (l c) n_l n_h n_w -> b l c n_l n_h n_w', c=1)
x = self._3dcnn(x)
x = self.transformer_structure(x)
x = self.softmax_classify(x)
return x
|
{"hexsha": "c0e92d44a97df35eef7e4d29a2ddbd1ba194c217", "size": 5833, "ext": "py", "lang": "Python", "max_stars_repo_path": "method/model.py", "max_stars_repo_name": "XinghuaMa/TR-Net", "max_stars_repo_head_hexsha": "879c32e130df668636a27ce5cb5e5d76cf90de66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-07-11T14:48:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T00:57:16.000Z", "max_issues_repo_path": "method/model.py", "max_issues_repo_name": "XinghuaMa/TRNet", "max_issues_repo_head_hexsha": "879c32e130df668636a27ce5cb5e5d76cf90de66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-07-01T02:30:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T09:35:24.000Z", "max_forks_repo_path": "method/model.py", "max_forks_repo_name": "XinghuaMa/TRNet", "max_forks_repo_head_hexsha": "879c32e130df668636a27ce5cb5e5d76cf90de66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-01T06:35:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T11:27:14.000Z", "avg_line_length": 30.7, "max_line_length": 116, "alphanum_fraction": 0.5986627807, "include": true, "reason": "import numpy", "num_tokens": 1549}
|
#Show which groups fit very poorly at the site levelbut fit well in sample.
rm(list=ls())
source('paths.r')
#set output path.----
output.path <- 'Supp._Fig._3._bad_out.of.sample_fits.png'
#load data.----
#grab prior fits.
prior <- readRDS(ted_ITS_prior_all.groups_JAGSfits.path) #all phylo and functional groups.
#grab forecasts and observations of functional and phylogenetic groups.----
fcast <- readRDS(NEON_site_fcast_all_groups_1k_rare.path)
neon.truth <- readRDS(NEON_all.phylo.levels_plot.site_obs_fastq_1k_rare.path)
#get prior rsq values.----
prior.out <- list()
for(i in 1:length(prior)){
lev <- prior[[i]]
lev.out <- list()
for(k in 1:ncol(lev$observed)){
fit <- lm(lev$observed[,k] ~ lev$predicted[,k])
rsq <- summary(fit)$r.squared
lev.out[[k]] <- rsq
}
tax <- colnames(lev$predicted)
lev.out <- unlist(lev.out)
return <- data.frame(tax,lev.out)
colnames(return) <- c('tax','rsq')
return <- return[return$tax != 'other',]
prior.out[[i]] <- return
}
prior.out <- do.call(rbind, prior.out)
#Get out of sample rsq values.----
fcast.out <- list()
for(i in 1:length(fcast)){
pred <- fcast[[i]]$site.fit$mean
obs <- neon.truth[[i]]$site.fit$mean
lev.out <- list()
for(k in 1:ncol(pred)){
fit <- lm(obs[,k] ~ pred[,k])
rsq <- summary(fit)$r.squared
lev.out[[k]] <- rsq
}
tax <- colnames(pred)
lev.out <- unlist(lev.out)
return <- data.frame(tax,lev.out)
colnames(return) <- c('tax','rsq')
return <- return[return$tax != 'other',]
fcast.out[[i]] <- return
}
fcast.out <- do.call(rbind, fcast.out)
out <- merge(prior.out, fcast.out, by = 'tax')
#Find taxa with highest in sample and lowest out of sample rsq.----
out <- out[order(out$rsq.x, decreasing = T),]
#great candidates are:
#1. Inocybe
#2. Hypocreales.
#3. Helotiales.
#grab predicted vs. observed for groups of interest----
namey <- c('Inocybe','Hypocreales','Helotiales')
level <- c('genus','order','order')
namey <- c('Inocybe','Thelephoraceae','Umbelopsis')
level <- c('genus','family','genus')
prior.plot <- list()
fcast.plot <- list()
for(i in 1:length(namey)){
#prior pred vs. obs.
pred <- prior[[which(names(prior) == level[i])]]$predicted
pred <- pred[,colnames(pred) == namey[i]]
obs <- prior[[which(names(prior) == level[i])]]$observed
obs <- obs[,colnames( obs) == namey[i]]
prior.plot[[i]] <- data.frame(obs, pred)
#fcst pred vs. obs.
pred <- fcast[[which(names(fcast) == level[i])]]$site.fit$mean
pred <- pred[,colnames(pred) == namey[i]]
obs <- neon.truth[[which(names(fcast) == level[i])]]$site.fit$mean
obs <- obs[,colnames(obs) == namey[i]]
fcast.plot[[i]] <- data.frame(obs, pred)
}
names(prior.plot) <- namey
names(fcast.plot) <- namey
#save line.----
png(output.path, height = 8, width = 6, units = 'in', res = 300)
#Plot fits.----
par(mfrow = c(3,2),
mar = c(1,1,1,1),
oma = c(4,4,3,1))
trans = 0.4
for(i in 1:length(namey)){
#grab observations, set x and y limits
obs1 <- prior.plot[[i]][,1]
pred1 <- prior.plot[[i]][,2]
obs2 <- fcast.plot[[i]][,1]
pred2 <- fcast.plot[[i]][,2]
lab <- namey[i]
limx <- c(min(pred1,pred2), max(pred1,pred2))
limy <- c(min(obs1 ,obs2 ), max(obs1 , obs2))
#prior plot panel.
mod <- lm((obs1) ~ (pred1))
rsq <- round(summary(mod)$r.squared, 2)
plot((obs1) ~ (pred1), pch = 16, bty = 'l', ylab = NA, xlab = NA, xlim = limx, ylim = limy)
abline(0,1,lwd = 2)
abline(mod, lwd = 2, lty = 2, col= adjustcolor('purple', trans))
mtext(lab, side = 3, adj = 0.05, line = -2.0)
mtext(bquote(R^2 == .(rsq)), side = 3, adj = 0.05, line = -3.75)
#fcast plot panel.
mod <- lm((obs2) ~ (pred2))
rsq <- round(summary(mod)$r.squared, 2)
plot((obs2) ~ (pred2), pch = 16, bty = 'l', ylab = NA, xlab = NA, xlim = limx, ylim = limy)
abline(0,1,lwd = 2)
abline(mod, lwd = 2, lty = 2, col= adjustcolor('purple', trans))
mtext(bquote(R^2 == .(rsq)), side = 3, adj = 0.05, line = -8)
}
#outer labels.
mtext('predicted', side = 1, cex = 1.5, outer = T, line = 2.5)
mtext('observed' , side = 2, cex = 1.5, outer = T, line = 2.0)
mtext('in-sample' , side = 3, cex = 1.5, outer = T, line = -0.5, adj = 0.20)
mtext('out-of-sample', side = 3, cex = 1.5, outer = T, line = -0.5, adj = 0.875)
#end plot.----
dev.off()
|
{"hexsha": "1106301115f2d052e53419cc760a101d5345e60d", "size": 4300, "ext": "r", "lang": "R", "max_stars_repo_path": "to_retire/Supp._Fig._3._bad_out.of.sample_fits.r", "max_stars_repo_name": "colinaverill/NEFI_microbe", "max_stars_repo_head_hexsha": "e59ddef4aafcefdf0aff61765a8684859daad6e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-13T17:13:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-13T17:13:54.000Z", "max_issues_repo_path": "to_retire/Supp._Fig._3._bad_out.of.sample_fits.r", "max_issues_repo_name": "colinaverill/NEFI_microbe", "max_issues_repo_head_hexsha": "e59ddef4aafcefdf0aff61765a8684859daad6e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "to_retire/Supp._Fig._3._bad_out.of.sample_fits.r", "max_forks_repo_name": "colinaverill/NEFI_microbe", "max_forks_repo_head_hexsha": "e59ddef4aafcefdf0aff61765a8684859daad6e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-02-21T20:26:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T16:09:44.000Z", "avg_line_length": 33.0769230769, "max_line_length": 94, "alphanum_fraction": 0.608372093, "num_tokens": 1541}
|
"""
-------------------------------------------------------
helper
a couple of helper functions
-------------------------------------------------------
Author: Dallas Fraser
ID: 110242560
Email: fras2560@mylaurier.ca
Version: 2014-09-10
-------------------------------------------------------
"""
import networkx as nx
import unittest
def make_co_R():
'''
a method to assemble a co-R graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_node(5)
g.add_edge(0, 4)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 5)
return g
def make_bridge():
'''
a method to assemble a bridge graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_co_R()
g.add_edge(0, 5)
g.add_edge(1, 5)
return g
def make_clique(n):
'''
makes a clique of size n
Parameters:
n: the size of the clique (int)
Returns:
clique: the graph (networkx)
'''
clique = nx.Graph()
for v in range(0, n):
clique.add_node(v)
end = len(clique.nodes())
for target in clique.nodes():
for source in range(target + 1, end):
clique.add_edge(target, source)
return clique
def make_kite():
'''
make_kite
assembles a kite (co-chair)
Parameters:
None
Returns:
kite: the kite (Graph)
'''
kite = make_diamond()
kite.add_node(4)
kite.add_edge(2, 4)
return kite
def make_claw():
'''
make_claw
assembles a claw
Parameters:
None
Returns:
claw: the claw (networkx)
'''
claw = nx.Graph()
for x in range(0, 4):
# add four vertices
claw.add_node(x)
# 0-vertex is the hub of claw
hub = 0
for x in range(1, 4):
claw.add_edge(hub, x)
return claw
def make_diamond():
'''
make_diamond
assembles a diamond
Parameters:
None
Returns:
diamond: the diamond graph (networkx)
'''
diamond = nx.Graph()
for x in range(0, 4):
# add four vertices
diamond.add_node(x)
diamond.add_edge(0, 1)
diamond.add_edge(0, 2)
diamond.add_edge(0, 3)
diamond.add_edge(1, 2)
diamond.add_edge(1, 3)
return diamond
def make_co_diamond():
'''
make_co_diamond
assembles a co-diamond
Parameters:
None
Returns:
co_diamond: the co-diamond graph (networkx)
'''
return nx.complement(make_diamond())
def make_co_claw():
'''
make_co_claw
assembles a co-claw
Parameters:
None
Returns:
co_claw: the co_claw (networkx)
'''
return nx.complement(make_claw())
def make_cycle(n):
'''
make_cycle
assembles a cycle with n vertices
Parameters:
n: the number of vertices in cycle (int)
Returns:
cycle: the cycle (networkx)
'''
cycle = nx.Graph()
for vertex in range(0, n):
# add all the vertices
cycle.add_node(vertex)
for vertex in range(0, n):
# add all the edges
cycle.add_edge(vertex, (vertex + 1) % n)
cycle.add_edge(vertex, (vertex - 1) % n)
return cycle
def make_wheel(n):
'''
make_wheel
assembles a wheel with n vertices
Parameters:
n: the number of vertices in the wheel (int)
Returns:
wheel: the wheel (networkx)
'''
wheel = make_cycle(n - 1)
wheel.add_node(n - 1)
for edge in range(0, n - 1):
wheel.add_edge(edge, n - 1)
return wheel
def join(G, H):
'''
join
a function which (complete) joins one graph G to graph H
Parameters:
G: Graph with at least one vertice (networkx)
H: Graph with at least one vertice (networkx)
Returns:
F: The join of G and H (Graph)
'''
# add all of
F = nx.Graph()
F.add_nodes_from(G.nodes())
F.add_edges_from(G.edges())
shift = G.number_of_nodes()
# add all nodes of H
for vertex in H.nodes():
F.add_node(vertex)
# add all of F edges
for e1, e2 in H.edges():
F.add_edge(e1 + shift, e2 + shift)
# join the two sets of nodes
for v1 in G.nodes():
for v2 in H.nodes():
F.add_edge(v1, v2 + shift)
return F
def convert_to_networkx(g):
'''
convert_to_networkx
a function which take a {'nodes':[n1],'edges':[[n1,n2]]}
and converts it to a networkx Graph object
Parameters:
g: the python dictionary repesentation of a graph (dictionary)
Returns:
graph: the graph G (networkx)
'''
graph = nx.Graph()
for node in g['nodes']:
graph.add_node(node)
for edge in g['edges']:
graph.add_edge(edge[0], edge[1])
return graph
def convert_to_d3(g):
'''
conver_to_d3
a function which takes a networkx Graph object and converts it to
a {'nodes':[n1],'edges':[[n1,n2]]}
Parameters:
g: the graph G (networkx)
Returns:
graph: python dictionary representation of a graph (dictionary)
'''
graph = {'nodes': [], 'edges': []}
for node in g.nodes():
graph['nodes'].append(node)
for edge in g.edges():
graph['edges'].append(edge)
return graph
def text_to_d3(lines):
'''
text_to_networkx
a function that takes the lines from a text file and puts into a format for
d3 graph
Parameters:
lines: a list of lines from the text file (list)
Returns:
d3: a d3 representation of the graph
'''
# try:
graph = {'nodes': [], 'edges': []}
for line in lines:
entries = line.split(":")
try:
node = int(entries[0])
except Exception:
node = None
if (len(entries) > 1):
entries[1] = entries[1].replace(" ", "")
edges = entries[1].split(",")
for edge in edges:
if edge != '':
e = int(edge)
if [e, node] not in graph['edges'] and node != e:
# do not want to add an edges twice
graph['edges'].append([node, e])
if node is not None:
graph['nodes'].append(node)
return graph
def d3_to_text(g):
'''
d3_to_text
a function that takes a d3 representation and converts it to text
Parameters:
g: the d3 graph representation
Returns:
graph: list of text representation the graph (list)
'''
graph = []
for node in g['nodes']:
line = str(node) + ":"
edges = []
for edge in g['edges']:
if node == edge[0]:
edges.append(str(edge[1]))
elif node == edge[1]:
edges.append(str(edge[0]))
line = line + ",".join(edges)
graph.append(line)
return graph
def complement(g):
'''
complement
a function which takes the complement of g
Parameters:
g: the graph (networkx)
Returns:
co_g: the complement graph (networkx)
Note:
does not have a unittest since not needed (written by someone else)
'''
return nx.complement(g)
def make_co_twin_c5():
'''
a function to assemble a co-Twin-C5
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_cycle(5)
g.add_node(5)
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(5, 1)
return g
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
{"hexsha": "9d81f6ebf5d50aa404086d76b76a02f69d588483", "size": 7579, "ext": "py", "lang": "Python", "max_stars_repo_path": "inducer/helper.py", "max_stars_repo_name": "fras2560/InducedSubgraph", "max_stars_repo_head_hexsha": "be06a444a2ef0d244831ee74152a8ef2711cdbe3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-10-21T20:40:48.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-21T20:40:48.000Z", "max_issues_repo_path": "inducer/helper.py", "max_issues_repo_name": "fras2560/InducedSubgraph", "max_issues_repo_head_hexsha": "be06a444a2ef0d244831ee74152a8ef2711cdbe3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-10-08T17:47:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-18T05:03:36.000Z", "max_forks_repo_path": "inducer/helper.py", "max_forks_repo_name": "fras2560/InducedSubgraph", "max_forks_repo_head_hexsha": "be06a444a2ef0d244831ee74152a8ef2711cdbe3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-05-03T00:47:48.000Z", "max_forks_repo_forks_event_max_datetime": "2016-08-31T12:54:44.000Z", "avg_line_length": 22.6916167665, "max_line_length": 79, "alphanum_fraction": 0.5482253595, "include": true, "reason": "import networkx", "num_tokens": 1990}
|
# ========== Transforms On A Group Of Signals ==========
# ----- SDWT on a set of signals -----
"""
sdwtall(x, wt[, L])
Computes the stationary discrete wavelet transform (SDWT) on each slice of signal.
# Arguments
- `x::AbstractArray{T} where T<:Number`: Input `N-1`-D signals, where each signal is sliced
at dimension `N`.
- `wt::OrthoFilter`: Orthogonal wavelet filter.
- `L::Integer`: (Default: `Wavelets.maxtransformlevels(xᵢ)`) Number of levels of wavelet
transforms.
# Returns
- `::Array{T}`: Slices of transformed signals.
# Examples
```julia
using Wavelets, WaveletsExt
# Generate random signals
x = randn(32, 5)
# Create wavelet
wt = wavelet(WT.db4)
# SDWT on all signals in x
xw = sdwtall(x, wt)
```
**See also:** [`sdwt`](@ref)
"""
function sdwtall(x::AbstractArray{T},
wt::OrthoFilter,
L::Integer = minimum(size(x)[1:end-1]) |> maxtransformlevels) where
T<:Number
@assert 2 ≤ ndims(x) ≤ 3
# Allocate space for transforms
sz = size(x)[1:(end-1)]
k = ndims(x)==2 ? L+1 : 3*L+1
N = size(x)[end]
xw = Array{T}(undef, (sz...,k,N))
# Dimension to slice
dim_xw = ndims(xw)
dim_x = ndims(x)
# Compute transforms
@inbounds begin
@views for (xwᵢ, xᵢ) in zip(eachslice(xw, dims=dim_xw), eachslice(x, dims=dim_x))
sdwt!(xwᵢ, xᵢ, wt, L)
end
end
return xw
end
"""
isdwtall(xw[, wt])
isdwtall(xw, wt, sm)
Computes the inverse stationary discrete wavelet transform (ISDWT) on each slice of signal.
# Arguments
- `xw::AbstractArray{T} where T<:Number`: SDWT-transformed signal.
- `wt::OrthoFilter`: (Default: `nothing`) Orthogonal wavelet filter.
- `sm::Integer`: If `sm` is included as an argument, the `sm`-shifted inverse transform will
be computed. This results in significantly faster computation, but fails to fully utilize
the strength of redundant wavelet transforms.
# Returns
- `::Array{T}`: Slices of reconstructed signals.
# Examples
```julia
using Wavelets, WaveletsExt
# Generate random signals
x = randn(32, 5)
# Create wavelet
wt = wavelet(WT.db4)
# SDWT on all signals in x
xw = sdwtall(x, wt)
# ISDWT on all signals in xw
x̂ = isdwtall(xw)
```
**See also:** [`isdwt`](@ref)
"""
function isdwtall(xw::AbstractArray{T}, wt::OrthoFilter) where T<:Number
@assert 3 ≤ ndims(xw) ≤ 4
# Allocate space for transforms
sz = size(xw)[1:(end-2)]
N = size(xw)[end]
x = Array{T}(undef, (sz...,N))
# Dimension to slice
dim_x = ndims(x)
dim_xw = ndims(xw)
# Compute transforms
@inbounds begin
@views for (xᵢ, xwᵢ) in zip(eachslice(x, dims=dim_x), eachslice(xw, dims=dim_xw))
isdwt!(xᵢ, xwᵢ, wt)
end
end
return x
end
function isdwtall(xw::AbstractArray{T}, wt::OrthoFilter, sm::Integer) where T<:Number
@assert 3 ≤ ndims(xw) ≤ 4
# Allocate space for transforms
sz = size(xw)[1:(end-2)]
N = size(xw)[end]
x = Array{T}(undef, (sz...,N))
# Dimension to slice
dim_x = ndims(x)
dim_xw = ndims(xw)
# Compute transforms
@inbounds begin
@views for (xᵢ, xwᵢ) in zip(eachslice(x, dims=dim_x), eachslice(xw, dims=dim_xw))
isdwt!(xᵢ, xwᵢ, wt, sm)
end
end
return x
end
# ----- SWPT on a set of signals -----
"""
swptall(x, wt[, L])
Computes the stationary wavelet packet transform (SWPT) on each slice of signal.
# Arguments
- `x::AbstractArray{T} where T<:Number`: Input `N-1`-D signals, where each signal is sliced
at dimension `N`.
- `wt::OrthoFilter`: Orthogonal wavelet filter.
- `L::Integer`: (Default: `Wavelets.maxtransformlevels(xᵢ)`) Number of levels of wavelet
transforms.
# Returns
- `::Array{T}`: Slices of transformed signals.
# Examples
```julia
using Wavelets, WaveletsExt
# Generate random signals
x = randn(32, 5)
# Create wavelet
wt = wavelet(WT.db4)
# SWPT on all signals in x
xw = swptall(x, wt)
```
**See also:** [`swpt`](@ref)
"""
function swptall(x::AbstractArray{T},
wt::OrthoFilter,
L::Integer = minimum(size(x)[1:end-1]) |> maxtransformlevels) where
T<:Number
@assert 2 ≤ ndims(x) ≤ 3
# Allocate space for transforms
sz = size(x)[1:(end-1)]
k = ndims(x)==2 ? 1<<L : Int(4^L)
N = size(x)[end]
xw = Array{T}(undef, (sz...,k,N))
# Dimension to slice
dim_xw = ndims(xw)
dim_x = ndims(x)
# Compute transforms
@inbounds begin
@views for (xwᵢ, xᵢ) in zip(eachslice(xw, dims=dim_xw), eachslice(x, dims=dim_x))
swpt!(xwᵢ, xᵢ, wt, L)
end
end
return xw
end
"""
iswptall(xw[, wt])
iswptall(xw, wt, sm)
Computes the inverse stationary wavelet packet transform (ISWPT) on each slice of signal.
# Arguments
- `xw::AbstractArray{T} where T<:Number`: SWPT-transformed signal.
- `wt::Union{OrthoFilter, Nothing}`: (Default: `nothing`) Orthogonal wavelet filter.
- `sm::Integer`: If `sm` is included as an argument, the `sm`-shifted inverse transform will
be computed. This results in significantly faster computation, but fails to fully utilize
the strength of redundant wavelet transforms.
# Returns
- `::Array{T}`: Slices of reconstructed signals.
# Examples
```julia
using Wavelets, WaveletsExt
# Generate random signals
x = randn(32, 5)
# Create wavelet
wt = wavelet(WT.db4)
# SWPT on all signals in x
xw = swptall(x, wt)
# ISWPT on all signals in xw
x̂ = iswptall(xw)
```
**See also:** [`iswpt`](@ref)
"""
function iswptall(xw::AbstractArray{T}, wt::OrthoFilter) where T<:Number
@assert 3 ≤ ndims(xw) ≤ 4
# Allocate space for transforms
sz = size(xw)[1:(end-2)]
N = size(xw)[end]
x = Array{T}(undef, (sz...,N))
# Dimension to slice
dim_x = ndims(x)
dim_xw = ndims(xw)
# Compute transforms
@inbounds begin
@views for (xᵢ, xwᵢ) in zip(eachslice(x, dims=dim_x), eachslice(xw, dims=dim_xw))
iswpt!(xᵢ, xwᵢ, wt)
end
end
return x
end
function iswptall(xw::AbstractArray{T}, wt::OrthoFilter, sm::Integer) where T<:Number
@assert 3 ≤ ndims(xw) ≤ 4
# Allocate space for transforms
sz = size(xw)[1:(end-2)]
N = size(xw)[end]
x = Array{T}(undef, (sz...,N))
# Dimension to slice
dim_x = ndims(x)
dim_xw = ndims(xw)
# Compute transforms
@inbounds begin
@views for (xᵢ, xwᵢ) in zip(eachslice(x, dims=dim_x), eachslice(xw, dims=dim_xw))
iswpt!(xᵢ, xwᵢ, wt, sm)
end
end
return x
end
# ----- SWPD on a set of signals -----
"""
swpdall(x, wt[, L])
Computes the stationary wavelet packet decomposition (SWPD) on each slice of signal.
# Arguments
- `x::AbstractArray{T} where T<:Number`: Input `N-1`-D signals, where each signal is sliced
at dimension `N`.
- `wt::OrthoFilter`: Orthogonal wavelet filter.
- `L::Integer`: (Default: `Wavelets.maxtransformlevels(xᵢ)`) Number of levels of wavelet
transforms.
# Returns
- `::Array{T}`: Slices of transformed signals.
# Examples
```julia
using Wavelets, WaveletsExt
# Generate random signals
x = randn(32, 5)
# Create wavelet
wt = wavelet(WT.db4)
# SWPD on all signals in x
xw = swpdall(x, wt)
```
**See also:** [`swpd`](@ref)
"""
function swpdall(x::AbstractArray{T},
wt::OrthoFilter,
L::Integer = minimum(size(x)[1:end-1]) |> maxtransformlevels) where
T<:Number
@assert 2 ≤ ndims(x) ≤ 3
# Allocate space for transforms
sz = size(x)[1:(end-1)]
k = ndims(x)==2 ? 1<<(L+1)-1 : sum(4 .^(0:L))
N = size(x)[end]
xw = Array{T}(undef, (sz...,k,N))
# Dimension to slice
dim_xw = ndims(xw)
dim_x = ndims(x)
# Compute transforms
@inbounds begin
@views for (xwᵢ, xᵢ) in zip(eachslice(xw, dims=dim_xw), eachslice(x, dims=dim_x))
swpd!(xwᵢ, xᵢ, wt, L)
end
end
return xw
end
"""
iswpdall(xw, wt, L, sm)
iswpdall(xw, wt[, L])
iswpdall(xw, wt, tree, sm)
iswpdall(xw, wt, tree)
Computes the inverse autocorrelation wavelet packet decomposition (ISWPD) on each slice of
signal.
# Arguments
- `xw::AbstractArray{T} where T<:Number`: SWPD-transformed signal.
- `wt::Union{OrthoFilter, Nothing}`: (Default: `nothing`) Orthogonal wavelet filter.
- `L::Integer`: (Default: `minimum(size(xw)[1:end-2]) |> maxtransformlevels`) Number of
levels of wavelet transforms.
- `tree::BitVector`: Binary tree for inverse transform to be computed accordingly.
- `sm::Integer`: If `sm` is included as an argument, the `sm`-shifted inverse transform will
be computed. This results in significantly faster computation, but fails to fully utilize
the strength of redundant wavelet transforms.
# Returns
- `::Array{T}`: Slices of reconstructed signals.
# Examples
```julia
using Wavelets, WaveletsExt
# Generate random signals
x = randn(32, 5)
# Create wavelet
wt = wavelet(WT.db4)
# SWPD on all signals in x
xw = swpdall(x, wt)
# ISWPD on all signals in xw
x̂ = iswpdall(xw)
x̂ = iswpdalll(xw, maketree(x))
x̂ = iswpdall(xw, 5)
```
**See also:** [`iswpd`](@ref)
"""
function iswpdall(xw::AbstractArray{T},
wt::OrthoFilter,
L::Integer = minimum(size(xw)[1:end-2]) |> maxtransformlevels) where
T<:Number
return iswpdall(xw, wt, maketree(size(xw)[1:(end-2)]..., L))
end
function iswpdall(xw::AbstractArray{T}, wt::OrthoFilter, L::Integer, sm::Integer) where
T<:Number
return iswpdall(xw, wt, maketree(size(xw)[1:(end-2)]..., L), sm)
end
function iswpdall(xw::AbstractArray{T}, wt::OrthoFilter, tree::BitVector) where T<:Number
@assert 3 ≤ ndims(xw) ≤ 4
# Allocate space for transforms
sz = size(xw)[1:(end-2)]
N = size(xw)[end]
x = Array{T}(undef, (sz...,N))
# Dimension to slice
dim_x = ndims(x)
dim_xw = ndims(xw)
# Compute transforms
@inbounds begin
@views for (xᵢ, xwᵢ) in zip(eachslice(x, dims=dim_x), eachslice(xw, dims=dim_xw))
iswpd!(xᵢ, xwᵢ, wt, tree)
end
end
return x
end
function iswpdall(xw::AbstractArray{T},
wt::OrthoFilter,
tree::BitVector,
sm::Integer) where T<:Number
@assert 3 ≤ ndims(xw) ≤ 4
# Allocate space for transforms
sz = size(xw)[1:(end-2)]
N = size(xw)[end]
x = Array{T}(undef, (sz...,N))
# Dimension to slice
dim_x = ndims(x)
dim_xw = ndims(xw)
# Compute transforms
@inbounds begin
@views for (xᵢ, xwᵢ) in zip(eachslice(x, dims=dim_x), eachslice(xw, dims=dim_xw))
iswpd!(xᵢ, xwᵢ, wt, tree, sm)
end
end
return x
end
|
{"hexsha": "28e9139efecfe62d1d6954a0cd7f1af0ab99e7aa", "size": 10649, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mod/swt_all.jl", "max_stars_repo_name": "UCD4IDS/WaveletsExt.jl", "max_stars_repo_head_hexsha": "cfde3ae0cea370b7da0e8d5723f0793bb87beb68", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-05-03T23:33:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T07:34:47.000Z", "max_issues_repo_path": "src/mod/swt_all.jl", "max_issues_repo_name": "UCD4IDS/WaveletsExt.jl", "max_issues_repo_head_hexsha": "cfde3ae0cea370b7da0e8d5723f0793bb87beb68", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-04-21T22:06:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T04:44:26.000Z", "max_forks_repo_path": "src/mod/swt_all.jl", "max_forks_repo_name": "UCD4IDS/WaveletsExt.jl", "max_forks_repo_head_hexsha": "cfde3ae0cea370b7da0e8d5723f0793bb87beb68", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-19T00:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T09:26:50.000Z", "avg_line_length": 27.1658163265, "max_line_length": 92, "alphanum_fraction": 0.6226875763, "num_tokens": 3516}
|
"""
GANTT Chart with Matplotlib
Sukhbinder
Inspired from
<div class="embed-theclowersgroup"><blockquote class="wp-embedded-content"><a href="http://www.clowersresearch.com/main/gantt-charts-in-matplotlib/">Gantt Charts in Matplotlib</a></blockquote><script type="text/javascript"><!--//--><![CDATA[//><!-- !function(a,b){"use strict";function c(){if(!e){e=!0;var a,c,d,f,g=-1!==navigator.appVersion.indexOf("MSIE 10"),h=!!navigator.userAgent.match(/Trident.*rv:11./),i=b.querySelectorAll("iframe.wp-embedded-content");for(c=0;c<i.length;c++)if(d=i[c],!d.getAttribute("data-secret")){if(f=Math.random().toString(36).substr(2,10),d.src+="#?secret="+f,d.setAttribute("data-secret",f),g||h)a=d.cloneNode(!0),a.removeAttribute("security"),d.parentNode.replaceChild(a,d)}else;}}var d=!1,e=!1;if(b.querySelector)if(a.addEventListener)d=!0;if(a.wp=a.wp||{},!a.wp.receiveEmbedMessage)if(a.wp.receiveEmbedMessage=function(c){var d=c.data;if(d.secret||d.message||d.value)if(!/[^a-zA-Z0-9]/.test(d.secret)){var e,f,g,h,i,j=b.querySelectorAll('iframe[data-secret="'+d.secret+'"]'),k=b.querySelectorAll('blockquote[data-secret="'+d.secret+'"]');for(e=0;e<k.length;e++)k[e].style.display="none";for(e=0;e<j.length;e++)if(f=j[e],c.source===f.contentWindow){if(f.removeAttribute("style"),"height"===d.message){if(g=parseInt(d.value,10),g>1e3)g=1e3;else if(200>~~g)g=200;f.height=g}if("link"===d.message)if(h=b.createElement("a"),i=b.createElement("a"),h.href=f.getAttribute("src"),i.href=d.value,i.host===h.host)if(b.activeElement===f)a.top.location.href=d.value}else;}},d)a.addEventListener("message",a.wp.receiveEmbedMessage,!1),b.addEventListener("DOMContentLoaded",c,!1),a.addEventListener("load",c,!1)}(window,document);//--><!]]></script><iframe sandbox="allow-scripts" security="restricted" src="http://www.clowersresearch.com/main/gantt-charts-in-matplotlib/embed/" width="600" height="338" title="“Gantt Charts in Matplotlib” — The Clowers Group" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" class="wp-embedded-content"></iframe></div>
"""
import sys
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.dates
from matplotlib.dates import WEEKLY,MONTHLY, DateFormatter, rrulewrapper, RRuleLocator
import numpy as np
#
# Main
#
def main():
label = sys.argv[1]
step = '.gantt'
CreateGanttChart(label, step)
#
# Functions
#
def CreateGanttChart(projectLabel, planningStep):
"""
Create gantt charts with matplotlib
Give file name.
"""
# read input Gantt data file:
fname = projectLabel + planningStep + '.txt'
ylabels = []
customDates = []
try:
textlist=open(fname).readlines()
except:
return
#
for tx in textlist:
if not tx.startswith('#'):
ylabel,startdate,enddate,absenddate=tx.split(',')
ylabels.append(ylabel.replace('\n',''))
customDates.append([_create_date(startdate.replace('\n','')),_create_date(enddate.replace('\n','')),_create_date(absenddate.replace('\n',''))])
# create the plot:
ilen=len(ylabels)
pos = np.arange(0.5,ilen*0.5+0.5,0.5)
task_dates = {}
for i,task in enumerate(ylabels):
task_dates[task] = customDates[i]
fig = plt.figure(figsize=(20,8))
ax = fig.add_subplot(111)
for i in range(len(ylabels)):
start_date,end_date,absend_date = task_dates[ylabels[i]]
ax.barh((i*0.5)+0.5, absend_date - start_date, left=start_date, height=0.3, align='center', color='red', alpha = 0.8)
ax.barh((i*0.5)+0.5, end_date - start_date, left=start_date, height=0.3, align='center', color='green', alpha = 0.8)
locsy, labelsy = plt.yticks(pos,ylabels)
plt.setp(labelsy, fontsize = 14)
# ax.axis('tight')
ax.set_ylim(ymin = -0.1, ymax = ilen*0.5+0.5)
ax.grid(color = 'grey', linestyle = ':')
ax.xaxis_date()
# chose for weeks or months as base x-axis resolution:
#rule = rrulewrapper(WEEKLY, interval=1)
rule = rrulewrapper(MONTHLY, interval=1)
loc = RRuleLocator(rule)
formatter = DateFormatter("%d-%b '%y")
#formatter = DateFormatter("%d-%b")
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(formatter)
labelsx = ax.get_xticklabels()
plt.setp(labelsx, rotation=30, fontsize=10)
font = font_manager.FontProperties(size='small')
ax.legend(loc=1,prop=font)
ax.invert_yaxis()
fig.autofmt_xdate()
plt.savefig(projectLabel+'.gantt.pdf')
#plt.show()
def _create_date(datetxt):
"""Creates the date"""
year,month,day=datetxt.split('-')
date = dt.datetime(int(year), int(month), int(day))
mdate = matplotlib.dates.date2num(date)
return mdate
#
# Boilerplate
#
if __name__ == '__main__':
main()
|
{"hexsha": "6376cb1af729c668f9be0d3da4ca2a2fe3fbe64d", "size": 4817, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gantt.plot.py", "max_stars_repo_name": "jackmo375/Planner2", "max_stars_repo_head_hexsha": "61bf816113fbd31604c5bd5d686c62d4f3764892", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gantt.plot.py", "max_issues_repo_name": "jackmo375/Planner2", "max_issues_repo_head_hexsha": "61bf816113fbd31604c5bd5d686c62d4f3764892", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gantt.plot.py", "max_forks_repo_name": "jackmo375/Planner2", "max_forks_repo_head_hexsha": "61bf816113fbd31604c5bd5d686c62d4f3764892", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.7669902913, "max_line_length": 1996, "alphanum_fraction": 0.6719950176, "include": true, "reason": "import numpy", "num_tokens": 1362}
|
# -*- coding: utf-8 -*-
"""Simulation Task3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hujTQ6qyEX9-NPn1bD_p4D8ZpGZ0t7rw
"""
from collections import deque
import pandas as pd
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
mRT_IAT = int(input("Input mean inter-arrival time of RT messages: "))
#nonRT_IAT = int(input("Input mean inter-arrival time of non RT messages: "))
mRT_ST = int(input("Input mean ST time of an RT message: "))
mnonRT_ST = int(input("Input mean ST time of a nonRT message: "))
batch_size = int(input("Input batch size: "))
total_batches = int(input("Input the total number of batches: "))
mnonRT_IAT = [10, 15, 20, 25, 30, 35, 40]
class Simulator:
def __init__(self, n_RT=0, n_nonRT=0, s=0, SCL=4, MC=0, RTCL=3, nonRTCL=5, preempted_ST = -1, \
mRT_IAT=7, mnonRT_IAT=6, mRT_ST=3, mnonRT_ST=3, batch_size = 1000, total_batches = 51):
self.n_RT = n_RT #number of items in RT queue
self.n_nonRT = n_nonRT #number of items in non RT queue
self.s = s #sever status, 0: ideal, 1: servicing RT msg, 2: servicing nonRT msg
self.SCL = SCL #service clock
self.MC = MC #master clock
self.RTCL = RTCL #next RT packet arrival time
self.nonRTCL = nonRTCL #next non RT packet arrival time
self.preempted_ST = preempted_ST #pre-empted service time
self.mRT_IAT = mRT_IAT #RT msg inter-arrival time mean
self.mnonRT_IAT = mnonRT_IAT #nonRT msg inter-arrival time mean
self.mRT_ST = mRT_ST #RT service time mean
self.mnonRT_ST = mnonRT_ST #nonRT service time mean
self.RT_arrivalq = deque([]) #store the arrival time of RT msg
self.nonRT_arrivalq = deque([])
self.event_list = [[RTCL, 0], [nonRTCL, 1], [SCL, 2]]
self.df = pd.DataFrame(columns = ['MC', 'RTCL', 'nonRTCL', 'n_RT', 'n_nonRT', 'SCL', 's', 'preempted_ST'])
self.RT_completionq = deque([])
self.nonRT_completionq = deque([])
self.RT_batch_mean = []
self.nonRT_batch_mean = []
self.RT_batch_percentile = []
self.nonRT_batch_percentile = []
self.msg_AT = 0#supposed to store the arrival of the current msg being processed
self.batch_size = batch_size
self.total_batches = total_batches
def RT_IA_time(self): return -self.mRT_IAT*np.log(np.random.uniform())
def nonRT_IA_time(self): return -self.mnonRT_IAT*np.log(np.random.uniform())
def RT_S_time(self): return -self.mRT_ST*np.log(np.random.uniform())
def nonRT_S_time(self): return -self.mnonRT_ST*np.log(np.random.uniform())
def start_simulation(self):
while len(self.RT_batch_mean) < self.total_batches or len(self.nonRT_batch_mean) < self.total_batches:
if self.SCL == 0:
event = min(self.event_list[:2])
else:
event = min(self.event_list)
self.MC = event[0]
if event[1] == 0:
self.RT_arrival()
elif event[1] == 1:
self.nonRT_arrival()
elif event[1] == 2:
self.ST_completion()
def RT_arrival(self):
self.RT_arrivalq.append(self.RTCL)
self.n_RT += 1
self.RTCL = self.MC + self.RT_IA_time()
self.event_list[0][0] = self.RTCL
if self.n_RT == 1 and self.s!=1:
if self.s == 2:
self.preempted_ST = self.SCL - self.MC
if self.preempted_ST > 0:
self.n_nonRT += 1
self.nonRT_arrivalq.appendleft(self.msg_AT)
elif self.preempted_ST == 0:
self.preempted_ST = -1
self.nonRT_completionq.append(self.MC - self.msg_AT) #non RT msg completes service so is not added back, add to comp q
self.msg_AT = self.RT_arrivalq.popleft()
self.SCL = self.MC + self.RT_S_time()
self.event_list[2][0] = self.SCL
self.n_RT -= 1
self.s = 1
def nonRT_arrival(self):
self.nonRT_arrivalq.append(self.nonRTCL)
self.n_nonRT += 1
self.nonRTCL = self.MC + self.nonRT_IA_time()
self.event_list[1][0] = self.nonRTCL
if self.n_nonRT == 1:
if self.s == 0:
self.msg_AT = self.nonRT_arrivalq.popleft()
self.SCL = self.MC + self.nonRT_S_time()
self.event_list[2][0] = self.SCL
self.s = 2
self.n_nonRT -= 1
def ST_completion(self):
#on event completion add the elapsed time to the queue
if self.s == 1:
self.RT_completionq.append(self.MC - self.msg_AT)
if len(self.RT_completionq) == self.batch_size:
#one batch is done
self.RT_batch_mean.append(np.mean(self.RT_completionq))
self.RT_batch_percentile.append(np.percentile(self.RT_completionq, 95))
self.RT_completionq = deque([]) #re-initialize the list
else:
self.nonRT_completionq.append(self.MC - self.msg_AT)
if len(self.nonRT_completionq) == self.batch_size:
self.nonRT_batch_mean.append(np.mean(self.nonRT_completionq))
self.nonRT_batch_percentile.append(np.percentile(self.nonRT_completionq, 95))
self.nonRT_completionq = deque([])
#set up the next SCL
if len(self.RT_arrivalq) > 0:
self.SCL = self.MC + self.RT_S_time()
self.s = 1
self.n_RT -= 1
self.msg_AT = self.RT_arrivalq.popleft()
self.event_list[2][0] = self.SCL
elif len(self.nonRT_arrivalq) > 0:
self.msg_AT = self.nonRT_arrivalq.popleft()
self.n_nonRT -= 1
self.s = 2
if self.preempted_ST > 0:
self.SCL = self.MC + self.preempted_ST
self.preempted_ST = -1
else:
self.SCL = self.MC + self.nonRT_S_time()
self.event_list[2][0] = self.SCL
else:
self.s = 0
self.SCL = 0
self.event_list[2][0] = 0
def simulator_data(self):
data = [self.MC, self.RTCL, self.nonRTCL, self.n_RT, self.n_nonRT, self.SCL, self.s, self.preempted_ST]
return data
def write_to_file(self, file_path):
self.df.to_csv(file_path, index=False)
df_RT_mbatch = pd.DataFrame(columns=['MIAT(nonRT)', 'mean', '95th percentile', 'confidence interval', 'error'])
df_nonRT_mbatch = pd.DataFrame(columns=['MIAT(nonRT)', 'mean', '95th percentile', 'confidence interval', 'error'])
df_RT_pbatch = pd.DataFrame(columns=['MIAT(nonRT)', 'mean', '95th percentile', 'confidence interval', 'error'])
df_nonRT_pbatch = pd.DataFrame(columns=['MIAT(nonRT)', 'mean', '95th percentile', 'confidence interval', 'error'])
"""Result calculation"""
for mean_time in mnonRT_IAT:
np.random.seed(0)
simulator = Simulator(n_RT=0, n_nonRT=0, s=2, SCL=4, MC=0, RTCL=3, nonRTCL=5, preempted_ST=-1, \
mRT_IAT=mRT_IAT, mnonRT_IAT=mean_time, mRT_ST=mRT_ST, \
mnonRT_ST=mnonRT_ST, batch_size=batch_size, total_batches=total_batches)
simulator.start_simulation()
simulator.RT_batch_mean = simulator.RT_batch_mean[1:51]
simulator.RT_batch_percentile = simulator.RT_batch_percentile[1:51]
simulator.nonRT_batch_mean = simulator.nonRT_batch_mean[1:51]
simulator.nonRT_batch_percentile = simulator.nonRT_batch_percentile[1:51]
#for mean batch RT
rt_mean = np.mean(simulator.RT_batch_mean)
rt_percentile = np.percentile(simulator.RT_batch_mean, 95)
rt_confidence_interval = st.t.interval(alpha=0.95, df=len(simulator.RT_batch_mean)-1, loc=rt_mean, scale=st.sem(simulator.RT_batch_mean))
rt_error = rt_confidence_interval[1] - rt_confidence_interval[0]
df_RT_mbatch = df_RT_mbatch.append(pd.Series([mean_time, rt_mean, rt_percentile, rt_confidence_interval, rt_error], index=df_RT_mbatch.columns), ignore_index=True)
#for mean batcb non RT
nonrt_mean = np.mean(simulator.nonRT_batch_mean)
nonrt_percentile = np.percentile(simulator.nonRT_batch_mean, 95)
nonrt_confidence_interval = st.t.interval(0.95, len(simulator.nonRT_batch_mean)-1, nonrt_mean, st.sem(simulator.nonRT_batch_mean))
nonrt_error = nonrt_confidence_interval[1] - nonrt_confidence_interval[0]
df_nonRT_mbatch = df_nonRT_mbatch.append(pd.Series([mean_time, nonrt_mean, nonrt_percentile, nonrt_confidence_interval, nonrt_error], index=df_nonRT_mbatch.columns), ignore_index=True)
#for percentile batch RT
rt_mean = np.mean(simulator.RT_batch_percentile)
rt_percentile = np.percentile(simulator.RT_batch_percentile, 95)
rt_confidence_interval = st.t.interval(alpha=0.95, df=len(simulator.RT_batch_percentile)-1, loc=rt_mean, scale=st.sem(simulator.RT_batch_percentile))
rt_error = rt_confidence_interval[1] - rt_confidence_interval[0]
df_RT_pbatch = df_RT_pbatch.append(pd.Series([mean_time, rt_mean, rt_percentile, rt_confidence_interval, rt_error], index=df_RT_pbatch.columns), ignore_index=True)
#for percentile batch non RT
nonrt_mean = np.mean(simulator.nonRT_batch_percentile)
nonrt_percentile = np.percentile(simulator.nonRT_batch_percentile, 95)
nonrt_confidence_interval = st.t.interval(0.95, len(simulator.nonRT_batch_percentile)-1, nonrt_mean, st.sem(simulator.nonRT_batch_percentile))
nonrt_error = nonrt_confidence_interval[1] - nonrt_confidence_interval[0]
df_nonRT_pbatch = df_nonRT_pbatch.append(pd.Series([mean_time, nonrt_mean, nonrt_percentile, nonrt_confidence_interval, nonrt_error], index=df_nonRT_pbatch.columns), ignore_index=True)
print("Observations for RT messages (Mean batches)")
print(df_RT_mbatch)
print("Observations for nonRT messages (Mean batches)")
print(df_nonRT_mbatch)
fig, ax = plt.subplots(figsize=(7,6))
x = np.arange(len(mnonRT_IAT))
width = 0.4
rect1 = ax.bar(x-width/2, height=df_RT_mbatch['mean'], width=width, yerr=df_RT_mbatch['error'], alpha=0.5, ecolor='black', capsize=10, label='RT msgs')
rect2 = ax.bar(x+width/2, height=df_nonRT_mbatch['mean'], width=width, yerr=df_nonRT_mbatch['error'], alpha=0.5, ecolor='black', capsize=10, label='nonRT msgs' )
ax.set_ylabel('Mean Response Time')
ax.set_xlabel('NonRT mean IAT')
ax.set_title('RT vs NonRT Response Time Mean')
ax.set_xticks(x)
ax.set_xticklabels(mnonRT_IAT)
ax.legend()
fig.tight_layout()
plt.show()
print("Observations for RT messages (Percentile batches)")
print(df_RT_pbatch)
print("Observations for nonRT messages (Percentile batches)")
print(df_nonRT_pbatch)
fig, ax = plt.subplots(figsize=(7,6))
x = np.arange(len(mnonRT_IAT))
width = 0.4
rect1 = ax.bar(x-width/2, height=df_RT_pbatch['mean'], width=width, yerr=df_RT_pbatch['error'], alpha=0.5, ecolor='black', capsize=10, label='RT msgs')
rect2 = ax.bar(x+width/2, height=df_nonRT_pbatch['mean'], width=width, yerr=df_nonRT_pbatch['error'], alpha=0.5, ecolor='black', capsize=10, label='nonRT msgs' )
ax.set_ylabel('Mean Response Time')
ax.set_xlabel('NonRT mean IAT')
ax.set_title('RT vs NonRT Response Time Mean')
ax.set_xticks(x)
ax.set_xticklabels(mnonRT_IAT)
ax.legend()
fig.tight_layout()
plt.show()
|
{"hexsha": "4a5004dfb8a58465aa5037c6002089fdbcadaeef", "size": 10781, "ext": "py", "lang": "Python", "max_stars_repo_path": "Simulation Task/Task 3/simulation_task3.py", "max_stars_repo_name": "ayush-bisht/IoT-Analytics", "max_stars_repo_head_hexsha": "edd41f9fe6749e7d7b0ce5b15f0e7b7e0db3e66a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Simulation Task/Task 3/simulation_task3.py", "max_issues_repo_name": "ayush-bisht/IoT-Analytics", "max_issues_repo_head_hexsha": "edd41f9fe6749e7d7b0ce5b15f0e7b7e0db3e66a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Simulation Task/Task 3/simulation_task3.py", "max_forks_repo_name": "ayush-bisht/IoT-Analytics", "max_forks_repo_head_hexsha": "edd41f9fe6749e7d7b0ce5b15f0e7b7e0db3e66a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4908424908, "max_line_length": 186, "alphanum_fraction": 0.7029960115, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3185}
|
#!/usr/bin/env python
import rospy
import sys
import numpy as np
import math
from geometry_msgs.msg import Vector3
from std_msgs.msg import Float64
from dynamixel_msgs.msg import JointState
PI = 3.14159265359
class TiltController:
def __init__(self):
# Create the Subscriber recive degree and publish it to motor in radians
self.leftMotorSubscriber = rospy.Subscriber("/tilt_controller/state", JointState, self.leftAngleCallback)
# Initialize the motor publisher
self.leftMotorPublisher = rospy.Publisher("/tilt_controller/command", Float64, queue_size=5, latch=True)
self.start = 0.0
self.motorPublish = Float64()
self.currentPosition = 1.65
self.motorPublish.data = self.currentPosition
self.leftMotorPublisher.publish(self.motorPublish)
self.state = True
rospy.sleep(2)
def deg2rad(self, value):
return value * PI / 180
def leftAngleCallback(self, msg):
self.currentPosition = msg.current_pos
print (self.currentPosition)
def startLoop(self):
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
rate.sleep()
if self.currentPosition >= 1.6 :
self.motorPublish.data = -1.65
self.leftMotorPublisher.publish(self.motorPublish)
elif self.currentPosition <= -1.6:
self.motorPublish.data = 1.65
self.leftMotorPublisher.publish(self.motorPublish)
else:
print ("Do nothing")
def main():
rospy.init_node('TiltController', anonymous = True)
tiltController = TiltController()
try:
tiltController.startLoop()
# rospy.spin()
except KeyboardInterrupt:
print "Shutting ArucoFinder node down"
if __name__ == '__main__':
main()
|
{"hexsha": "427f2f48bd507d97a3166970f7fdee55099f75fe", "size": 1854, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/platform_controller/scripts/tiltController.py", "max_stars_repo_name": "ahmohamed1/activeStereoVisionPlatform", "max_stars_repo_head_hexsha": "6c928ca242e4de68c7b15a8748bff1d9f7fa1382", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/platform_controller/scripts/tiltController.py", "max_issues_repo_name": "ahmohamed1/activeStereoVisionPlatform", "max_issues_repo_head_hexsha": "6c928ca242e4de68c7b15a8748bff1d9f7fa1382", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/platform_controller/scripts/tiltController.py", "max_forks_repo_name": "ahmohamed1/activeStereoVisionPlatform", "max_forks_repo_head_hexsha": "6c928ca242e4de68c7b15a8748bff1d9f7fa1382", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.671641791, "max_line_length": 113, "alphanum_fraction": 0.6510248112, "include": true, "reason": "import numpy", "num_tokens": 417}
|
"""
AbstractInput
Abstract supertype for all input types.
### Notes
The input types defined here implement an iterator interface, such that other methods
can build upon the behavior of inputs which are either constant or varying.
Iteration is supported with an index number called *iterator state*.
The iteration function `Base.iterate` takes and returns a tuple (`input`, `state`),
where `input` represents the value of the input, and `state` is an index which
counts the number of times this iterator was called.
A convenience function `nextinput(input, n)` is also provided and it returns the
first `n` elements of `input`.
"""
abstract type AbstractInput end
"""
ConstantInput{UT} <: AbstractInput
Type representing an input that remains constant in time.
### Fields
- `U` -- input set
### Examples
The constant input holds a single element and its length is infinite.
To access the field `U`, you can use Base's `iterate` given a state, or the method
`nextinput` given the number of desired input elements:
```jldoctest constant_input
julia> c = ConstantInput(-1//2)
ConstantInput{Rational{Int64}}(-1//2)
julia> iterate(c, 1)
(-1//2, nothing)
julia> iterate(c, 2)
(-1//2, nothing)
julia> collect(nextinput(c, 4))
4-element Array{Rational{Int64},1}:
-1//2
-1//2
-1//2
-1//2
```
The elements of this input are rational numbers:
```jldoctest constant_input
julia> eltype(c)
Rational{Int64}
```
To transform a constant input, you can use `map` as in:
```jldoctest constant_input
julia> map(x->2*x, c)
ConstantInput{Rational{Int64}}(-1//1)
```
"""
struct ConstantInput{UT} <: AbstractInput
U::UT
end
Base.eltype(::Type{ConstantInput{UT}}) where {UT} = UT
Base.iterate(input::ConstantInput, state::Union{Int, Nothing}=nothing) = (input.U, nothing)
Base.IteratorSize(::Type{<:ConstantInput}) = Base.IsInfinite()
Base.IteratorEltype(::Type{<:ConstantInput}) = Base.HasEltype()
Base.map(f::Function, c::ConstantInput) = ConstantInput(f(c.U))
"""
nextinput(input::ConstantInput, n::Int=1)
Returns the first `n` elements of this input.
### Input
- `input` -- a constant input
- `n` -- (optional, default: `1`) the number of desired elements
### Output
A repeated iterator that generates `n` equal samples of this input.
"""
nextinput(input::ConstantInput, n::Int=1) = Base.Iterators.repeated(input.U, n)
"""
VaryingInput{UT, VUT<:AbstractVector{UT}} <: AbstractInput
Type representing an input that may vary with time.
### Fields
- `U` -- vector of input sets
### Examples
The varying input holds a vector and its length equals the number
of elements in the vector. Consider an input given by a vector of rational numbers:
```jldoctest varying_input
julia> v = VaryingInput([-1//2, 1//2])
VaryingInput{Rational{Int64},Array{Rational{Int64},1}}(Rational{Int64}[-1//2, 1//2])
julia> length(v)
2
julia> eltype(v)
Rational{Int64}
```
Base's `iterate` method receives the input and an integer state and returns the
input element and the next iteration state:
```jldoctest varying_input
julia> iterate(v, 1)
(-1//2, 2)
julia> iterate(v, 2)
(1//2, 3)
```
The method `nextinput` receives a varying input and an integer `n` and returns
an iterator over the first `n` elements of this input (where `n=1` by default):
```jldoctest varying_input
julia> typeof(nextinput(v))
Base.Iterators.Take{VaryingInput{Rational{Int64},Array{Rational{Int64},1}}}
julia> collect(nextinput(v, 1))
1-element Array{Rational{Int64},1}:
-1//2
julia> collect(nextinput(v, 2))
2-element Array{Rational{Int64},1}:
-1//2
1//2
```
You can collect the inputs in an array, or equivalently use list comprehension,
(or use a `for` loop):
```jldoctest varying_input
julia> collect(v)
2-element Array{Rational{Int64},1}:
-1//2
1//2
julia> [2*vi for vi in v]
2-element Array{Rational{Int64},1}:
-1//1
1//1
```
Since this input type is finite, querying more elements than its length returns
the full vector:
```jldoctest varying_input
julia> collect(nextinput(v, 4))
2-element Array{Rational{Int64},1}:
-1//2
1//2
```
To transform a varying input, you can use `map` as in:
```jldoctest varying_input
julia> map(x->2*x, v)
VaryingInput{Rational{Int64},Array{Rational{Int64},1}}(Rational{Int64}[-1//1, 1//1])
```
"""
struct VaryingInput{UT, VUT<:AbstractVector{UT}} <: AbstractInput
U::VUT # input sequence
end
Base.eltype(::Type{VaryingInput{UT, VUT}}) where {UT, VUT} = UT
function Base.iterate(input::VaryingInput, state::Int=1)
if state > length(input.U)
return nothing
else
return (input.U[state], state + 1)
end
end
Base.length(input::VaryingInput) = length(input.U)
Base.IteratorSize(::Type{<:VaryingInput}) = Base.HasLength()
Base.IteratorEltype(::Type{<:VaryingInput}) = Base.HasEltype()
Base.map(f::Function, v::VaryingInput) = VaryingInput(f.(v.U))
"""
nextinput(input::VaryingInput, n::Int=1)
Returns the first `n` elements of this input.
### Input
- `input` -- varying input
- `n` -- (optional, default: `1`) number of desired elements
### Output
An iterator of type `Base.Iterators.Take` that represents at most the first
`n` elements of this input.
"""
nextinput(input::VaryingInput, n::Int=1) = Base.Iterators.take(input, n)
|
{"hexsha": "2dfd8441c471ea6546d92e87e0704f03c1437e42", "size": 5233, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/inputs.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/MathematicalSystems.jl-d14a8603-c872-5ed3-9ece-53e0e82e39da", "max_stars_repo_head_hexsha": "af916edc33ab9d27f064a8be5c96a5cb954558b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-23T15:21:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-23T15:21:47.000Z", "max_issues_repo_path": "src/inputs.jl", "max_issues_repo_name": "sthagen/MathematicalSystems.jl", "max_issues_repo_head_hexsha": "403c2e5d64a10054d7d8a678e6e04acb7020d7f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/inputs.jl", "max_forks_repo_name": "sthagen/MathematicalSystems.jl", "max_forks_repo_head_hexsha": "403c2e5d64a10054d7d8a678e6e04acb7020d7f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1152073733, "max_line_length": 91, "alphanum_fraction": 0.7089623543, "num_tokens": 1552}
|
import numpy as np
from .PSpecCls import PSpecCls
def CombinePSpecCls(A):
'''
Combine an array/list/tuple of SpecCls objects into a single one.
This assumes that all axis labels and stuff are identical.
Input
=====
A : array/list/tuple
Each element should be a SpecCls object
Returns
=======
SpecCls
'''
#count them
n = np.size(A)
#create the initial object
out = PSpecCls( SpecType=A[0].SpecType,
xlabel=A[0].xlabel,
ylabele=A[0].ylabele,
ylabelv=A[0].ylabelv,
zlabelp=A[0].zlabelp,
zlabelf=A[0].zlabelf,
ylog=A[0]._ylog,
zlog=A[0]._zlog,
ScaleType=A[0]._ScaleType,
nStd=A[0]._nStd)
#loop through and add each one
for i in range(0,n):
#we must loop through each elements in each one too
for j in range(0,A[i].n):
out.AddData(A[i].Date[j],A[i].ut[j],A[i].Epoch[j],A[i].Energy[j],A[i].Spec[j],ew=A[i].ew[j],dt=A[i].dt[j],Meta=A[i].Meta[j],Label=A[i].Label[j])
return out
|
{"hexsha": "64b5f47f0c6ff572589715a588d4e0089330d85c", "size": 958, "ext": "py", "lang": "Python", "max_stars_repo_path": "Arase/Tools/CombinePSpecCls.py", "max_stars_repo_name": "mattkjames7/Arase", "max_stars_repo_head_hexsha": "996167be35a13bbb1fdddfbe75e3a06d124b1d25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Arase/Tools/CombinePSpecCls.py", "max_issues_repo_name": "mattkjames7/Arase", "max_issues_repo_head_hexsha": "996167be35a13bbb1fdddfbe75e3a06d124b1d25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-10T22:51:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-10T22:51:09.000Z", "max_forks_repo_path": "Arase/Tools/CombinePSpecCls.py", "max_forks_repo_name": "mattkjames7/Arase", "max_forks_repo_head_hexsha": "996167be35a13bbb1fdddfbe75e3a06d124b1d25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2790697674, "max_line_length": 147, "alphanum_fraction": 0.6409185804, "include": true, "reason": "import numpy", "num_tokens": 327}
|
#import uuid
import argparse
import glob
import os
import tifffile
import numpy as np
#Example usage:
#python im2npy.py --source_dir="C:\Users\PROCOMP11-PC\Desktop\PanColorGAN\PanColorGAN-master\PanColorGAN-master\dataset\PAN\tif" --save_to="C:\Users\PROCOMP11-PC\Desktop\PanColorGAN\PanColorGAN-master\PanColorGAN-master\dataset\PAN\all_pan"
parse = argparse.ArgumentParser(description= 'Converts image into numpy array.')
parse.add_argument('--save_to', help = '[DIRECTORY] - Where to save the numpy array?')
parse.add_argument('--extension', help = 'Image extension', default = 'tif')
args = parse.parse_args()
parse.add_argument('--extension', help = 'Image extension', default = 'tif')
args = pare.parse_args()
def get_image_ID_generator(source_dir,extension):
IDs = []
for ims in range(len(os.listdir(source_dir))):
images = glob.glob(source_dir + "\*.{}".format(extension))
base_name = os.path.splitext(images[ims])[0]
data_split = base_name.split("_")
data_ID = data_split[-2] + '_' + data_split[-1]
IDs.append(data_ID)
yield IDs
def im2npy(source_dir,save_to,image_extension):
if not os.path.exists(save_to):
raise NameError("[WARNING] Could not find the target directory : [save_to]. Check if the directory is readable")
images = glob.glob(source_dir + '\*.{}'.format(image_extension))
print("\n[INFO] There are {} images in the folder that meets the searching criteria.".format(len(images)))
IDs = get_image_ID_generator(source_dir = source_dir, extension = image_extension)
iter_IDs = iter(IDs)
for i,image in enumerate(images):
image =tifffile.imread(image) #sonradan ekledim. görüntüyü açıp doğru okuduğundan emin ol!
ID = next(iter_IDs)
#run_ID = str(uuid.uuid4())
np_img = np.asarray(image)
np.save(save_to + '\\np-' + ID[-1] + '.npy', np_img)
print("[INFO] ENDED. {} numpy arrrays are created in total.".format(i + 1))
im2npy(source_dir = args.source_dir, save_to = args.save_to, image_extension = args.extension)
|
{"hexsha": "3fcef12abdb6d6a1ba10b2c5b9846a2e8cb5521a", "size": 2137, "ext": "py", "lang": "Python", "max_stars_repo_path": "Script/im2npy.py", "max_stars_repo_name": "ataozarslan/GEO_tutorial", "max_stars_repo_head_hexsha": "c02c8bcfbdf89deabaaea259195e57d669ad3f69", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-11-29T19:01:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-04T12:57:18.000Z", "max_issues_repo_path": "Script/im2npy.py", "max_issues_repo_name": "yunusdasdelen/GEO_tutorial", "max_issues_repo_head_hexsha": "0b8bdc897dd8e0276577e7f6e4a22ebd2d4ff382", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-11-22T19:44:13.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-23T13:16:06.000Z", "max_forks_repo_path": "Script/im2npy.py", "max_forks_repo_name": "yunusdasdelen/GEO_tutorial", "max_forks_repo_head_hexsha": "0b8bdc897dd8e0276577e7f6e4a22ebd2d4ff382", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2020-11-23T13:34:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-16T11:42:57.000Z", "avg_line_length": 38.1607142857, "max_line_length": 241, "alphanum_fraction": 0.679457183, "include": true, "reason": "import numpy", "num_tokens": 534}
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
from numpy import array
def dY_dt(Y,t,p):
return array([
(- p[0] * Y[0]) * ( k__mCLN ),
(p[10] * p[1] * Y[0]/p[11] * Y[2]) * ( k__Cln_plus ) - (p[2] * Y[1]) * ( k__Cln_minus ),
(p[6] * p[10] * Y[5]/p[11] * (p[3]/(p[3]+p[4]+p[5])) * Y[2]) * ( k__B_R ),
(p[6] * p[10] * Y[6]/p[11] * (p[4]/(p[3]+p[4]+p[5])) * Y[2]) * ( k__B_Am ),
(p[6] * p[10] * Y[6]/p[11] * (p[5]/(p[3]+p[4]+p[5])) * Y[2]) * ( k__B_Ad ),
(0) * ( k__mB_R ),
(0) * ( k__mB_A ),
(- p[7] * Y[7]) * ( k__mCLB ),
((p[10] * p[8] * Y[7]/p[11] * Y[2]) * (Y[4]**1.5/(Y[4]**1.5 + Y[3]**1.5))) * ( k__Clb_plus ) - (p[9] * Y[8]) * ( k__Clb_minus )
])
|
{"hexsha": "da19b9f9f504b02a89227a8caf4a88f34d1ac17a", "size": 674, "ext": "py", "lang": "Python", "max_stars_repo_path": "core_eq_system_w_modifiers.py", "max_stars_repo_name": "thomasspiesser/MYpop", "max_stars_repo_head_hexsha": "aa26659af75e99189e77c4f4e046985e536918b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core_eq_system_w_modifiers.py", "max_issues_repo_name": "thomasspiesser/MYpop", "max_issues_repo_head_hexsha": "aa26659af75e99189e77c4f4e046985e536918b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core_eq_system_w_modifiers.py", "max_forks_repo_name": "thomasspiesser/MYpop", "max_forks_repo_head_hexsha": "aa26659af75e99189e77c4f4e046985e536918b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6470588235, "max_line_length": 129, "alphanum_fraction": 0.409495549, "include": true, "reason": "from numpy", "num_tokens": 387}
|
function fill_cpmodel!(optimizer::Optimizer)
# Adding variables
bridge_variables!(optimizer)
# Adding affine functions
bridge_affines!(optimizer)
# Adding constraints
bridge_constraints!(optimizer)
# Adding objective
bridge_objective!(optimizer)
optimizer
end
function bridge_objective!(optimizer::Optimizer)
if !isnothing(optimizer.moimodel.objective_identifier)
SeaPearl.addObjective!(optimizer.cpmodel, get_cp_variable(optimizer, optimizer.moimodel.objective_identifier))
end
end
function get_cp_variable(optimizer::Optimizer, index::MOI.VariableIndex)
cp_identifier = optimizer.moimodel.variables[index.value].cp_identifier
return optimizer.cpmodel.variables[cp_identifier]
end
function get_cp_variable(optimizer::Optimizer, index::AffineIndex)
cp_identifier = optimizer.moimodel.affines[index.value].cp_identifier
return optimizer.cpmodel.variables[cp_identifier]
end
function bridge_constraints!(optimizer::Optimizer)
for moiconstraint in optimizer.moimodel.constraints
constraint = create_CPConstraint(moiconstraint, optimizer)
if isnothing(constraint)
continue
end
# add constraint to the model
push!(optimizer.cpmodel.constraints, constraint)
end
end
function bridge_variables!(optimizer::Optimizer)
i = 1
for x in optimizer.moimodel.variables
@assert !isnothing(x.min) "Every variable must have a lower bound"
@assert !isnothing(x.max) "Every variable must have an upper bound"
x.cp_identifier = string(length(keys(optimizer.cpmodel.variables)) + 1)
newvariable = SeaPearl.IntVar(x.min, x.max, x.cp_identifier, optimizer.cpmodel.trailer)
SeaPearl.addVariable!(optimizer.cpmodel, newvariable)
i += 1
end
end
function build_affine_term!(optimizer::Optimizer, vi::MOI.VariableIndex, coeff::Float64)
@assert isinteger(coeff) "You can't give a float coefficient."
intCoeff = convert(Int, coeff)
@assert intCoeff != 0 "Coefficient cannot be null."
if intCoeff == 1
return get_cp_variable(optimizer, vi)
end
if intCoeff < 0
x = build_affine_term!(optimizer, vi, -coeff)
new_id = string(length(keys(optimizer.cpmodel.variables)) + 1)
new_var = IntVarViewOpposite(x, new_id)
addVariable!(optimizer.cpmodel, new_var)
return new_var
end
x = build_affine_term!(optimizer, vi, 1.)
new_id = string(length(keys(optimizer.cpmodel.variables)) + 1)
new_var = IntVarViewMul(x, intCoeff, new_id)
addVariable!(optimizer.cpmodel, new_var)
return new_var
end
function build_affine!(optimizer::Optimizer, aff_function::MOIAffineFunction)
if !isnothing(aff_function.cp_identifier)
return aff_function.cp_identifier
end
vars = Vector{AbstractIntVar}(undef, length(aff_function.content.terms) + 1)
for i in 1:length(aff_function.content.terms)
vi = aff_function.content.terms[i].variable_index
coeff = aff_function.content.terms[i].coefficient
x = build_affine_term!(optimizer, vi, coeff)
vars[i] = x
end
# Dealing with the constant
c = aff_function.content.constant
c = convert(Int, c)
if c != 0
x = vars[length(aff_function.content.terms)]
new_id = string(length(keys(optimizer.cpmodel.variables)) + 1)
new_var = IntVarViewOffset(x, c, new_id)
addVariable!(optimizer.cpmodel, new_var)
vars[length(aff_function.content.terms)] = new_var
end
# Creating the variable that will be equal to the affine function
minSum, maxSum = 0, 0
for i in 1:length(aff_function.content.terms)
minSum += minimum(vars[i].domain)
maxSum += maximum(vars[i].domain)
end
new_id = string(length(keys(optimizer.cpmodel.variables)) + 1)
sum = IntVar(minSum, maxSum, new_id, optimizer.cpmodel.trailer)
addVariable!(optimizer.cpmodel, sum)
# Creating the constraint that will make it equal
new_id = string(length(keys(optimizer.cpmodel.variables)) + 1)
lastForSumToZero = IntVarViewOpposite(sum, new_id)
addVariable!(optimizer.cpmodel, lastForSumToZero)
vars[end] = lastForSumToZero
constraint = SumToZero(vars, optimizer.cpmodel.trailer)
push!(optimizer.cpmodel.constraints, constraint)
# Returning the CP identifier of the variable that is equal to the affine function (and storing it, a bit ugly)
aff_function.cp_identifier = sum.id
sum.id
end
function bridge_affines!(optimizer::Optimizer)
for aff in optimizer.moimodel.affines
build_affine!(optimizer, aff)
end
end
|
{"hexsha": "8c88f56f3b1b107d908edb9b6dea41154ceb8a9c", "size": 4792, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MOI_wrapper/homemade_bridging.jl", "max_stars_repo_name": "pitmonticone/SeaPearl.jl", "max_stars_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2021-04-20T16:29:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:17:03.000Z", "max_issues_repo_path": "src/MOI_wrapper/homemade_bridging.jl", "max_issues_repo_name": "pitmonticone/SeaPearl.jl", "max_issues_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2021-04-23T17:20:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:42:24.000Z", "max_forks_repo_path": "src/MOI_wrapper/homemade_bridging.jl", "max_forks_repo_name": "pitmonticone/SeaPearl.jl", "max_forks_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-05-10T23:32:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T02:44:34.000Z", "avg_line_length": 35.2352941176, "max_line_length": 119, "alphanum_fraction": 0.6938647746, "num_tokens": 1121}
|
#include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/Polyhedron_3.h>
#include <CGAL/point_generators_3.h>
#include <CGAL/Side_of_triangle_mesh.h>
#include <vector>
#include <fstream>
#include <limits>
#include <boost/foreach.hpp>
typedef CGAL::Exact_predicates_inexact_constructions_kernel K;
typedef K::Point_3 Point;
typedef CGAL::Polyhedron_3<K> Polyhedron;
double max_coordinate(const Polyhedron& poly)
{
double max_coord = (std::numeric_limits<double>::min)();
BOOST_FOREACH(Polyhedron::Vertex_handle v, vertices(poly))
{
Point p = v->point();
max_coord = (std::max)(max_coord, p.x());
max_coord = (std::max)(max_coord, p.y());
max_coord = (std::max)(max_coord, p.z());
}
return max_coord;
}
int main(int argc, char* argv[])
{
const char* filename = (argc > 1) ? argv[1] : "data/eight.off";
std::ifstream input(filename);
Polyhedron poly;
if (!input || !(input >> poly) || poly.empty()
|| !CGAL::is_triangle_mesh(poly))
{
std::cerr << "Not a valid input file." << std::endl;
return 1;
}
CGAL::Side_of_triangle_mesh<Polyhedron, K> inside(poly);
double size = max_coordinate(poly);
unsigned int nb_points = 100;
std::vector<Point> points;
points.reserve(nb_points);
CGAL::Random_points_in_cube_3<Point> gen(size);
for (unsigned int i = 0; i < nb_points; ++i)
points.push_back(*gen++);
std::cout << "Test " << nb_points << " random points in cube "
<< "[-" << size << "; " << size <<"]" << std::endl;
int nb_inside = 0;
int nb_boundary = 0;
for (std::size_t i = 0; i < nb_points; ++i)
{
CGAL::Bounded_side res = inside(points[i]);
if (res == CGAL::ON_BOUNDED_SIDE) { ++nb_inside; }
if (res == CGAL::ON_BOUNDARY) { ++nb_boundary; }
}
std::cerr << "Total query size: " << points.size() << std::endl;
std::cerr << " " << nb_inside << " points inside " << std::endl;
std::cerr << " " << nb_boundary << " points on boundary " << std::endl;
std::cerr << " " << points.size() - nb_inside - nb_boundary << " points outside " << std::endl;
return 0;
}
|
{"hexsha": "ca3e886360f6e61ac57163cbf27fbeabf1a1cfa8", "size": 2103, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/examples/Polygon_mesh_processing/point_inside_example.cpp", "max_stars_repo_name": "liminchen/OptCuts", "max_stars_repo_head_hexsha": "cb85b06ece3a6d1279863e26b5fd17a5abb0834d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 187.0, "max_stars_repo_stars_event_min_datetime": "2019-01-23T04:07:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T03:44:58.000Z", "max_issues_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/examples/Polygon_mesh_processing/point_inside_example.cpp", "max_issues_repo_name": "xiaoxie5002/OptCuts", "max_issues_repo_head_hexsha": "1f4168fc867f47face85fcfa3a572be98232786f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2019-03-22T13:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T13:23:23.000Z", "max_forks_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/examples/Polygon_mesh_processing/point_inside_example.cpp", "max_forks_repo_name": "xiaoxie5002/OptCuts", "max_forks_repo_head_hexsha": "1f4168fc867f47face85fcfa3a572be98232786f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34.0, "max_forks_repo_forks_event_min_datetime": "2019-02-13T01:11:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T03:29:40.000Z", "avg_line_length": 28.8082191781, "max_line_length": 98, "alphanum_fraction": 0.637660485, "num_tokens": 623}
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** Correctness proof for common subexpression elimination. *)
Require Import Coqlib.
Require Import Maps.
Require Import AST.
Require Import Errors.
Require Import Values.
Require Import Memory.
Require Import Events.
Require Import Globalenvs.
Require Import Smallstep.
Require Import Op.
Require Import Registers.
Require Import RTL.
Require Import RTLtyping.
Require Import Kildall.
Require Import CombineOp.
Require Import CombineOpproof.
Require Import CSE.
Require Import Integers.
Require Import Axioms.
Require Import mem_lemmas.
(*Require Import core_semantics.
Require Import core_semantics_lemmas.*)
Require Import semantics.
Require Import structured_injections.
Require Import reach.
Require Import effect_semantics.
Require Import simulations.
Require Import effect_properties.
Require Import simulations_lemmas.
Require Import RTL_coop.
Require Import BuiltinEffects.
Require Import RTL_eff.
(** * Semantic properties of value numberings *)
(** ** Well-formedness of numberings *)
(** A numbering is well-formed if all registers mentioned in equations
are less than the ``next'' register number given in the numbering.
This guarantees that the next register is fresh with respect to
the equations. *)
Definition wf_rhs (next: valnum) (rh: rhs) : Prop :=
match rh with
| Op op vl => forall v, In v vl -> Plt v next
| Load chunk addr vl => forall v, In v vl -> Plt v next
end.
Definition wf_equation (next: valnum) (vr: valnum) (rh: rhs) : Prop :=
Plt vr next /\ wf_rhs next rh.
Inductive wf_numbering (n: numbering) : Prop :=
wf_numbering_intro
(EQS: forall v rh,
In (v, rh) n.(num_eqs) -> wf_equation n.(num_next) v rh)
(REG: forall r v,
PTree.get r n.(num_reg) = Some v -> Plt v n.(num_next))
(VAL: forall r v,
In r (PMap.get v n.(num_val)) -> PTree.get r n.(num_reg) = Some v).
Lemma wf_empty_numbering:
wf_numbering empty_numbering.
Proof.
unfold empty_numbering; split; simpl; intros.
contradiction.
rewrite PTree.gempty in H. congruence.
rewrite PMap.gi in H. contradiction.
Qed.
Lemma wf_rhs_increasing:
forall next1 next2 rh,
Ple next1 next2 ->
wf_rhs next1 rh -> wf_rhs next2 rh.
Proof.
intros; destruct rh; simpl; intros; apply Plt_Ple_trans with next1; auto.
Qed.
Lemma wf_equation_increasing:
forall next1 next2 vr rh,
Ple next1 next2 ->
wf_equation next1 vr rh -> wf_equation next2 vr rh.
Proof.
intros. destruct H0. split.
apply Plt_Ple_trans with next1; auto.
apply wf_rhs_increasing with next1; auto.
Qed.
(** We now show that all operations over numberings
preserve well-formedness. *)
Lemma wf_valnum_reg:
forall n r n' v,
wf_numbering n ->
valnum_reg n r = (n', v) ->
wf_numbering n' /\ Plt v n'.(num_next) /\ Ple n.(num_next) n'.(num_next).
Proof.
intros until v. unfold valnum_reg. intros WF VREG. inversion WF.
destruct ((num_reg n)!r) as [v'|] eqn:?.
(* found *)
inv VREG. split. auto. split. eauto. apply Ple_refl.
(* not found *)
inv VREG. split.
split; simpl; intros.
apply wf_equation_increasing with (num_next n). apply Ple_succ. auto.
rewrite PTree.gsspec in H. destruct (peq r0 r).
inv H. apply Plt_succ.
apply Plt_trans_succ; eauto.
rewrite PMap.gsspec in H. destruct (peq v (num_next n)).
subst v. simpl in H. destruct H. subst r0. apply PTree.gss. contradiction.
rewrite PTree.gso. eauto. exploit VAL; eauto. congruence.
simpl. split. apply Plt_succ. apply Ple_succ.
Qed.
Lemma wf_valnum_regs:
forall rl n n' vl,
wf_numbering n ->
valnum_regs n rl = (n', vl) ->
wf_numbering n' /\
(forall v, In v vl -> Plt v n'.(num_next)) /\
Ple n.(num_next) n'.(num_next).
Proof.
induction rl; intros.
simpl in H0. inversion H0. subst n'; subst vl.
simpl. intuition.
simpl in H0.
caseEq (valnum_reg n a). intros n1 v1 EQ1.
caseEq (valnum_regs n1 rl). intros ns vs EQS.
rewrite EQ1 in H0; rewrite EQS in H0; simpl in H0.
inversion H0. subst n'; subst vl.
generalize (wf_valnum_reg _ _ _ _ H EQ1); intros [A1 [B1 C1]].
generalize (IHrl _ _ _ A1 EQS); intros [As [Bs Cs]].
split. auto.
split. simpl; intros. elim H1; intro.
subst v. eapply Plt_Ple_trans; eauto.
auto.
eapply Ple_trans; eauto.
Qed.
Lemma find_valnum_rhs_correct:
forall rh vn eqs,
find_valnum_rhs rh eqs = Some vn -> In (vn, rh) eqs.
Proof.
induction eqs; simpl.
congruence.
case a; intros v r'. case (eq_rhs rh r'); intro.
intro. left. congruence.
intro. right. auto.
Qed.
Lemma find_valnum_num_correct:
forall rh vn eqs,
find_valnum_num vn eqs = Some rh -> In (vn, rh) eqs.
Proof.
induction eqs; simpl.
congruence.
destruct a as [v' r']. destruct (peq vn v'); intros.
left. congruence.
right. auto.
Qed.
Remark in_remove:
forall (A: Type) (eq: forall (x y: A), {x=y}+{x<>y}) x y l,
In y (List.remove eq x l) <-> x <> y /\ In y l.
Proof.
induction l; simpl.
tauto.
destruct (eq x a).
subst a. rewrite IHl. tauto.
simpl. rewrite IHl. intuition congruence.
Qed.
Lemma wf_forget_reg:
forall n rd r v,
wf_numbering n ->
In r (PMap.get v (forget_reg n rd)) -> r <> rd /\ PTree.get r n.(num_reg) = Some v.
Proof.
unfold forget_reg; intros. inversion H.
destruct ((num_reg n)!rd) as [vd|] eqn:?.
rewrite PMap.gsspec in H0. destruct (peq v vd).
subst vd. rewrite in_remove in H0. destruct H0. split. auto. eauto.
split; eauto. exploit VAL; eauto. congruence.
split; eauto. exploit VAL; eauto. congruence.
Qed.
Lemma wf_update_reg:
forall n rd vd r v,
wf_numbering n ->
In r (PMap.get v (update_reg n rd vd)) -> PTree.get r (PTree.set rd vd n.(num_reg)) = Some v.
Proof.
unfold update_reg; intros. inversion H. rewrite PMap.gsspec in H0.
destruct (peq v vd).
subst v; simpl in H0. destruct H0.
subst r. apply PTree.gss.
exploit wf_forget_reg; eauto. intros [A B]. rewrite PTree.gso; eauto.
exploit wf_forget_reg; eauto. intros [A B]. rewrite PTree.gso; eauto.
Qed.
Lemma wf_add_rhs:
forall n rd rh,
wf_numbering n ->
wf_rhs n.(num_next) rh ->
wf_numbering (add_rhs n rd rh).
Proof.
intros. inversion H. unfold add_rhs.
destruct (find_valnum_rhs rh n.(num_eqs)) as [vres|] eqn:?.
(* found *)
exploit find_valnum_rhs_correct; eauto. intros IN.
split; simpl; intros.
auto.
rewrite PTree.gsspec in H1. destruct (peq r rd).
inv H1. exploit EQS; eauto. intros [A B]. auto.
eauto.
eapply wf_update_reg; eauto.
(* not found *)
split; simpl; intros.
destruct H1.
inv H1. split. apply Plt_succ. apply wf_rhs_increasing with n.(num_next). apply Ple_succ. auto.
apply wf_equation_increasing with n.(num_next). apply Ple_succ. auto.
rewrite PTree.gsspec in H1. destruct (peq r rd).
inv H1. apply Plt_succ.
apply Plt_trans_succ. eauto.
eapply wf_update_reg; eauto.
Qed.
Lemma wf_add_op:
forall n rd op rs,
wf_numbering n ->
wf_numbering (add_op n rd op rs).
Proof.
intros. unfold add_op. destruct (is_move_operation op rs) as [r|] eqn:?.
(* move *)
destruct (valnum_reg n r) as [n' v] eqn:?.
exploit wf_valnum_reg; eauto. intros [A [B C]]. inversion A.
constructor; simpl; intros.
eauto.
rewrite PTree.gsspec in H0. destruct (peq r0 rd). inv H0. auto. eauto.
eapply wf_update_reg; eauto.
(* not a move *)
destruct (valnum_regs n rs) as [n' vs] eqn:?.
exploit wf_valnum_regs; eauto. intros [A [B C]].
eapply wf_add_rhs; eauto.
Qed.
Lemma wf_add_load:
forall n rd chunk addr rs,
wf_numbering n ->
wf_numbering (add_load n rd chunk addr rs).
Proof.
intros. unfold add_load.
caseEq (valnum_regs n rs). intros n' vl EQ.
generalize (wf_valnum_regs _ _ _ _ H EQ). intros [A [B C]].
apply wf_add_rhs; auto.
Qed.
Lemma wf_add_unknown:
forall n rd,
wf_numbering n ->
wf_numbering (add_unknown n rd).
Proof.
intros. inversion H. unfold add_unknown. constructor; simpl; intros.
eapply wf_equation_increasing; eauto. auto with coqlib.
rewrite PTree.gsspec in H0. destruct (peq r rd).
inv H0. auto with coqlib.
apply Plt_trans_succ; eauto.
exploit wf_forget_reg; eauto. intros [A B]. rewrite PTree.gso; eauto.
Qed.
Remark kill_eqs_in:
forall pred v rhs eqs,
In (v, rhs) (kill_eqs pred eqs) -> In (v, rhs) eqs /\ pred rhs = false.
Proof.
induction eqs; simpl; intros.
tauto.
destruct (pred (snd a)) eqn:?.
exploit IHeqs; eauto. tauto.
simpl in H; destruct H. subst a. auto. exploit IHeqs; eauto. tauto.
Qed.
Lemma wf_kill_equations:
forall pred n, wf_numbering n -> wf_numbering (kill_equations pred n).
Proof.
intros. inversion H. unfold kill_equations; split; simpl; intros.
exploit kill_eqs_in; eauto. intros [A B]. auto.
eauto.
eauto.
Qed.
Lemma wf_add_store:
forall n chunk addr rargs rsrc,
wf_numbering n -> wf_numbering (add_store n chunk addr rargs rsrc).
Proof.
intros. unfold add_store.
destruct (valnum_regs n rargs) as [n1 vargs] eqn:?.
exploit wf_valnum_regs; eauto. intros [A [B C]].
assert (wf_numbering (kill_equations (filter_after_store chunk addr vargs) n1)).
apply wf_kill_equations. auto.
destruct chunk; auto; apply wf_add_rhs; auto.
Qed.
Lemma wf_transfer:
forall f pc n, wf_numbering n -> wf_numbering (transfer f pc n).
Proof.
intros. unfold transfer.
destruct (f.(fn_code)!pc); auto.
destruct i; auto.
apply wf_add_op; auto.
apply wf_add_load; auto.
apply wf_add_store; auto.
apply wf_empty_numbering.
apply wf_empty_numbering.
destruct e; (apply wf_empty_numbering ||
apply wf_add_unknown; auto; apply wf_kill_equations; auto).
Qed.
(** As a consequence, the numberings computed by the static analysis
are well formed. *)
Theorem wf_analyze:
forall f approx pc, analyze f = Some approx -> wf_numbering approx!!pc.
Proof.
unfold analyze; intros.
eapply Solver.fixpoint_invariant with (P := wf_numbering); eauto.
exact wf_empty_numbering.
intros. eapply wf_transfer; eauto.
Qed.
(** ** Properties of satisfiability of numberings *)
Module ValnumEq.
Definition t := valnum.
Definition eq := peq.
End ValnumEq.
Module VMap := EMap(ValnumEq).
Section SATISFIABILITY.
Variable ge: genv.
Variable sp: val.
(** Agremment between two mappings from value numbers to values
up to a given value number. *)
Definition valu_agree (valu1 valu2: valnum -> val) (upto: valnum) : Prop :=
forall v, Plt v upto -> valu2 v = valu1 v.
Lemma valu_agree_refl:
forall valu upto, valu_agree valu valu upto.
Proof.
intros; red; auto.
Qed.
Lemma valu_agree_trans:
forall valu1 valu2 valu3 upto12 upto23,
valu_agree valu1 valu2 upto12 ->
valu_agree valu2 valu3 upto23 ->
Ple upto12 upto23 ->
valu_agree valu1 valu3 upto12.
Proof.
intros; red; intros. transitivity (valu2 v).
apply H0. apply Plt_Ple_trans with upto12; auto.
apply H; auto.
Qed.
Lemma valu_agree_list:
forall valu1 valu2 upto vl,
valu_agree valu1 valu2 upto ->
(forall v, In v vl -> Plt v upto) ->
map valu2 vl = map valu1 vl.
Proof.
intros. apply list_map_exten. intros. symmetry. apply H. auto.
Qed.
(** The [numbering_holds] predicate (defined in file [CSE]) is
extensional with respect to [valu_agree]. *)
Lemma numbering_holds_exten:
forall valu1 valu2 n rs m,
valu_agree valu1 valu2 n.(num_next) ->
wf_numbering n ->
numbering_holds valu1 ge sp rs m n ->
numbering_holds valu2 ge sp rs m n.
Proof.
intros. inversion H0. inversion H1. split; intros.
exploit EQS; eauto. intros [A B]. red in B.
generalize (H2 _ _ H4).
unfold equation_holds; destruct rh.
rewrite (valu_agree_list valu1 valu2 n.(num_next)).
rewrite H. auto. auto. auto. auto.
rewrite (valu_agree_list valu1 valu2 n.(num_next)).
rewrite H. auto. auto. auto. auto.
rewrite H. auto. eauto.
Qed.
(** [valnum_reg] and [valnum_regs] preserve the [numbering_holds] predicate.
Moreover, it is always the case that the returned value number has
the value of the given register in the final assignment of values to
value numbers. *)
Lemma valnum_reg_holds:
forall valu1 rs n r n' v m,
wf_numbering n ->
numbering_holds valu1 ge sp rs m n ->
valnum_reg n r = (n', v) ->
exists valu2,
numbering_holds valu2 ge sp rs m n' /\
valu2 v = rs#r /\
valu_agree valu1 valu2 n.(num_next).
Proof.
intros until v. unfold valnum_reg.
caseEq (n.(num_reg)!r).
(* Register already has a value number *)
intros. inversion H2. subst n'; subst v0.
inversion H1.
exists valu1. split. auto.
split. symmetry. auto.
apply valu_agree_refl.
(* Register gets a fresh value number *)
intros. inversion H2. subst n'. subst v. inversion H1.
set (valu2 := VMap.set n.(num_next) rs#r valu1).
assert (AG: valu_agree valu1 valu2 n.(num_next)).
red; intros. unfold valu2. apply VMap.gso.
auto with coqlib.
destruct (numbering_holds_exten _ _ _ _ _ AG H0 H1) as [A B].
exists valu2.
split. split; simpl; intros. auto.
unfold valu2, VMap.set, ValnumEq.eq.
rewrite PTree.gsspec in H5. destruct (peq r0 r).
inv H5. rewrite peq_true. auto.
rewrite peq_false. auto.
assert (Plt vn (num_next n)). inv H0. eauto.
red; intros; subst; eapply Plt_strict; eauto.
split. unfold valu2. rewrite VMap.gss. auto.
auto.
Qed.
Lemma valnum_regs_holds:
forall rs rl valu1 n n' vl m,
wf_numbering n ->
numbering_holds valu1 ge sp rs m n ->
valnum_regs n rl = (n', vl) ->
exists valu2,
numbering_holds valu2 ge sp rs m n' /\
List.map valu2 vl = rs##rl /\
valu_agree valu1 valu2 n.(num_next).
Proof.
induction rl; simpl; intros.
(* base case *)
inversion H1; subst n'; subst vl.
exists valu1. split. auto. split. auto. apply valu_agree_refl.
(* inductive case *)
caseEq (valnum_reg n a); intros n1 v1 EQ1.
caseEq (valnum_regs n1 rl); intros ns vs EQs.
rewrite EQ1 in H1; rewrite EQs in H1. inversion H1. subst vl; subst n'.
generalize (valnum_reg_holds _ _ _ _ _ _ _ H H0 EQ1).
intros [valu2 [A [B C]]].
generalize (wf_valnum_reg _ _ _ _ H EQ1). intros [D [E F]].
generalize (IHrl _ _ _ _ _ D A EQs).
intros [valu3 [P [Q R]]].
exists valu3.
split. auto.
split. simpl. rewrite R. congruence. auto.
eapply valu_agree_trans; eauto.
Qed.
(** A reformulation of the [equation_holds] predicate in terms
of the value to which a right-hand side of an equation evaluates. *)
Definition rhs_evals_to
(valu: valnum -> val) (rh: rhs) (m: mem) (v: val) : Prop :=
match rh with
| Op op vl =>
eval_operation ge sp op (List.map valu vl) m = Some v
| Load chunk addr vl =>
exists a,
eval_addressing ge sp addr (List.map valu vl) = Some a /\
Mem.loadv chunk m a = Some v
end.
Lemma equation_evals_to_holds_1:
forall valu rh v vres m,
rhs_evals_to valu rh m v ->
equation_holds valu ge sp m vres rh ->
valu vres = v.
Proof.
intros until m. unfold rhs_evals_to, equation_holds.
destruct rh. congruence.
intros [a1 [A1 B1]] [a2 [A2 B2]]. congruence.
Qed.
Lemma equation_evals_to_holds_2:
forall valu rh v vres m,
wf_rhs vres rh ->
rhs_evals_to valu rh m v ->
equation_holds (VMap.set vres v valu) ge sp m vres rh.
Proof.
intros until m. unfold wf_rhs, rhs_evals_to, equation_holds.
rewrite VMap.gss.
assert (forall vl: list valnum,
(forall v, In v vl -> Plt v vres) ->
map (VMap.set vres v valu) vl = map valu vl).
intros. apply list_map_exten. intros.
symmetry. apply VMap.gso. apply Plt_ne. auto.
destruct rh; intros; rewrite H; auto.
Qed.
(** The numbering obtained by adding an equation [rd = rhs] is satisfiable
in a concrete register set where [rd] is set to the value of [rhs]. *)
Lemma add_rhs_satisfiable_gen:
forall n rh valu1 rs rd rs' m,
wf_numbering n ->
wf_rhs n.(num_next) rh ->
numbering_holds valu1 ge sp rs m n ->
rhs_evals_to valu1 rh m (rs'#rd) ->
(forall r, r <> rd -> rs'#r = rs#r) ->
numbering_satisfiable ge sp rs' m (add_rhs n rd rh).
Proof.
intros. unfold add_rhs.
caseEq (find_valnum_rhs rh n.(num_eqs)).
(* RHS found *)
intros vres FINDVN. inversion H1.
exists valu1. split; simpl; intros.
auto.
rewrite PTree.gsspec in H6.
destruct (peq r rd).
inv H6.
symmetry. eapply equation_evals_to_holds_1; eauto.
apply H4. apply find_valnum_rhs_correct. congruence.
rewrite H3; auto.
(* RHS not found *)
intro FINDVN.
set (valu2 := VMap.set n.(num_next) (rs'#rd) valu1).
assert (AG: valu_agree valu1 valu2 n.(num_next)).
red; intros. unfold valu2. apply VMap.gso.
auto with coqlib.
elim (numbering_holds_exten _ _ _ _ _ AG H H1); intros.
exists valu2. split; simpl; intros.
destruct H6.
inv H6. unfold valu2. eapply equation_evals_to_holds_2; eauto. auto.
rewrite PTree.gsspec in H6. destruct (peq r rd).
unfold valu2. inv H6. rewrite VMap.gss. auto.
rewrite H3; auto.
Qed.
Lemma add_rhs_satisfiable:
forall n rh valu1 rs rd v m,
wf_numbering n ->
wf_rhs n.(num_next) rh ->
numbering_holds valu1 ge sp rs m n ->
rhs_evals_to valu1 rh m v ->
numbering_satisfiable ge sp (rs#rd <- v) m (add_rhs n rd rh).
Proof.
intros. eapply add_rhs_satisfiable_gen; eauto.
rewrite Regmap.gss; auto.
intros. apply Regmap.gso; auto.
Qed.
(** [add_op] returns a numbering that is satisfiable in the register
set after execution of the corresponding [Iop] instruction. *)
Lemma add_op_satisfiable:
forall n rs op args dst v m,
wf_numbering n ->
numbering_satisfiable ge sp rs m n ->
eval_operation ge sp op rs##args m = Some v ->
numbering_satisfiable ge sp (rs#dst <- v) m (add_op n dst op args).
Proof.
intros. inversion H0.
unfold add_op.
caseEq (@is_move_operation reg op args).
intros arg EQ.
destruct (is_move_operation_correct _ _ EQ) as [A B]. subst op args.
caseEq (valnum_reg n arg). intros n1 v1 VL.
generalize (valnum_reg_holds _ _ _ _ _ _ _ H H2 VL). intros [valu2 [A [B C]]].
generalize (wf_valnum_reg _ _ _ _ H VL). intros [D [E F]].
elim A; intros. exists valu2; split; simpl; intros.
auto. rewrite Regmap.gsspec. rewrite PTree.gsspec in H5.
destruct (peq r dst). simpl in H1. congruence. auto.
intro NEQ. caseEq (valnum_regs n args). intros n1 vl VRL.
generalize (valnum_regs_holds _ _ _ _ _ _ _ H H2 VRL). intros [valu2 [A [B C]]].
generalize (wf_valnum_regs _ _ _ _ H VRL). intros [D [E F]].
apply add_rhs_satisfiable with valu2; auto.
simpl. congruence.
Qed.
(** [add_load] returns a numbering that is satisfiable in the register
set after execution of the corresponding [Iload] instruction. *)
Lemma add_load_satisfiable:
forall n rs chunk addr args dst a v m,
wf_numbering n ->
numbering_satisfiable ge sp rs m n ->
eval_addressing ge sp addr rs##args = Some a ->
Mem.loadv chunk m a = Some v ->
numbering_satisfiable ge sp (rs#dst <- v) m (add_load n dst chunk addr args).
Proof.
intros. inversion H0.
unfold add_load.
caseEq (valnum_regs n args). intros n1 vl VRL.
generalize (valnum_regs_holds _ _ _ _ _ _ _ H H3 VRL). intros [valu2 [A [B C]]].
generalize (wf_valnum_regs _ _ _ _ H VRL). intros [D [E F]].
apply add_rhs_satisfiable with valu2; auto.
simpl. exists a; split; congruence.
Qed.
(** [add_unknown] returns a numbering that is satisfiable in the
register set after setting the target register to any value. *)
Lemma add_unknown_satisfiable:
forall n rs dst v m,
wf_numbering n ->
numbering_satisfiable ge sp rs m n ->
numbering_satisfiable ge sp (rs#dst <- v) m (add_unknown n dst).
Proof.
intros. destruct H0 as [valu A].
set (valu' := VMap.set n.(num_next) v valu).
assert (numbering_holds valu' ge sp rs m n).
eapply numbering_holds_exten; eauto.
unfold valu'; red; intros. apply VMap.gso. auto with coqlib.
destruct H0 as [B C].
exists valu'; split; simpl; intros.
eauto.
rewrite PTree.gsspec in H0. rewrite Regmap.gsspec.
destruct (peq r dst). inversion H0. unfold valu'. rewrite VMap.gss; auto.
eauto.
Qed.
(** Satisfiability of [kill_equations]. *)
Lemma kill_equations_holds:
forall pred valu n rs m m',
(forall v r,
equation_holds valu ge sp m v r -> pred r = false -> equation_holds valu ge sp m' v r) ->
numbering_holds valu ge sp rs m n ->
numbering_holds valu ge sp rs m' (kill_equations pred n).
Proof.
intros. destruct H0 as [A B]. red; simpl. split; intros.
exploit kill_eqs_in; eauto. intros [C D]. eauto.
auto.
Qed.
(** [kill_loads] preserves satisfiability. Moreover, the resulting numbering
is satisfiable in any concrete memory state. *)
Lemma kill_loads_satisfiable:
forall n rs m m',
numbering_satisfiable ge sp rs m n ->
numbering_satisfiable ge sp rs m' (kill_loads n).
Proof.
intros. destruct H as [valu A]. exists valu. eapply kill_equations_holds with (m := m); eauto.
intros. destruct r; simpl in *. rewrite <- H. apply op_depends_on_memory_correct; auto.
congruence.
Qed.
(** [add_store] returns a numbering that is satisfiable in the memory state
after execution of the corresponding [Istore] instruction. *)
Lemma add_store_satisfiable:
forall n rs chunk addr args src a m m',
wf_numbering n ->
numbering_satisfiable ge sp rs m n ->
eval_addressing ge sp addr rs##args = Some a ->
Mem.storev chunk m a (rs#src) = Some m' ->
Val.has_type (rs#src) (type_of_chunk_use chunk) ->
numbering_satisfiable ge sp rs m' (add_store n chunk addr args src).
Proof.
intros. unfold add_store. destruct H0 as [valu A].
destruct (valnum_regs n args) as [n1 vargs] eqn:?.
exploit valnum_regs_holds; eauto. intros [valu' [B [C D]]].
exploit wf_valnum_regs; eauto. intros [U [V W]].
set (n2 := kill_equations (filter_after_store chunk addr vargs) n1).
assert (numbering_holds valu' ge sp rs m' n2).
apply kill_equations_holds with (m := m); auto.
intros. destruct r; simpl in *.
rewrite <- H0. apply op_depends_on_memory_correct; auto.
destruct H0 as [a' [P Q]].
destruct (eq_list_valnum vargs l); simpl in H4; try congruence. subst l.
rewrite negb_false_iff in H4.
exists a'; split; auto.
destruct a; simpl in H2; try congruence.
destruct a'; simpl in Q; try congruence.
simpl. rewrite <- Q.
rewrite C in P. eapply Mem.load_store_other; eauto.
exploit addressing_separated_sound; eauto. intuition congruence.
assert (N2: numbering_satisfiable ge sp rs m' n2).
exists valu'; auto.
set (n3 := add_rhs n2 src (Load chunk addr vargs)).
assert (N3: Val.load_result chunk (rs#src) = rs#src -> numbering_satisfiable ge sp rs m' n3).
intro EQ. unfold n3. apply add_rhs_satisfiable_gen with valu' rs.
apply wf_kill_equations; auto.
red. auto. auto.
red. exists a; split. congruence.
rewrite <- EQ. destruct a; simpl in H2; try discriminate. simpl.
eapply Mem.load_store_same; eauto.
auto.
destruct chunk; auto; apply N3.
simpl in H3. destruct (rs#src); auto || contradiction.
simpl in H3. destruct (rs#src); auto || contradiction.
simpl in H3. destruct (rs#src); auto || contradiction.
simpl in H3. destruct (rs#src); auto || contradiction.
Qed.
(** Correctness of [reg_valnum]: if it returns a register [r],
that register correctly maps back to the given value number. *)
Lemma reg_valnum_correct:
forall n v r, wf_numbering n -> reg_valnum n v = Some r -> n.(num_reg)!r = Some v.
Proof.
unfold reg_valnum; intros. inv H.
destruct ((num_val n)#v) as [| r1 rl] eqn:?; inv H0.
eapply VAL. rewrite Heql. auto with coqlib.
Qed.
(** Correctness of [find_rhs]: if successful and in a
satisfiable numbering, the returned register does contain the
result value of the operation or memory load. *)
Lemma find_rhs_correct:
forall valu rs m n rh r,
wf_numbering n ->
numbering_holds valu ge sp rs m n ->
find_rhs n rh = Some r ->
rhs_evals_to valu rh m rs#r.
Proof.
intros until r. intros WF NH.
unfold find_rhs.
caseEq (find_valnum_rhs rh n.(num_eqs)); intros.
exploit find_valnum_rhs_correct; eauto. intros.
exploit reg_valnum_correct; eauto. intros.
inversion NH.
generalize (H3 _ _ H1). rewrite (H4 _ _ H2).
destruct rh; simpl; auto.
discriminate.
Qed.
(** Correctness of operator reduction *)
Section REDUCE.
Variable A: Type.
Variable f: (valnum -> option rhs) -> A -> list valnum -> option (A * list valnum).
Variable V: Type.
Variable rs: regset.
Variable m: mem.
Variable sem: A -> list val -> option V.
Hypothesis f_sound:
forall eqs valu op args op' args',
(forall v rhs, eqs v = Some rhs -> equation_holds valu ge sp m v rhs) ->
f eqs op args = Some(op', args') ->
sem op' (map valu args') = sem op (map valu args).
Variable n: numbering.
Variable valu: valnum -> val.
Hypothesis n_holds: numbering_holds valu ge sp rs m n.
Hypothesis n_wf: wf_numbering n.
Lemma regs_valnums_correct:
forall vl rl, regs_valnums n vl = Some rl -> rs##rl = map valu vl.
Proof.
induction vl; simpl; intros.
inv H. auto.
destruct (reg_valnum n a) as [r1|] eqn:?; try discriminate.
destruct (regs_valnums n vl) as [rx|] eqn:?; try discriminate.
inv H. simpl; decEq; auto.
eapply (proj2 n_holds); eauto. eapply reg_valnum_correct; eauto.
Qed.
Lemma reduce_rec_sound:
forall niter op args op' rl' res,
reduce_rec A f n niter op args = Some(op', rl') ->
sem op (map valu args) = Some res ->
sem op' (rs##rl') = Some res.
Proof.
induction niter; simpl; intros.
discriminate.
destruct (f (fun v : valnum => find_valnum_num v (num_eqs n)) op args)
as [[op1 args1] | ] eqn:?.
assert (sem op1 (map valu args1) = Some res).
rewrite <- H0. eapply f_sound; eauto.
simpl; intros. apply (proj1 n_holds). eapply find_valnum_num_correct; eauto.
destruct (reduce_rec A f n niter op1 args1) as [[op2 rl2] | ] eqn:?.
inv H. eapply IHniter; eauto.
destruct (regs_valnums n args1) as [rl|] eqn:?.
inv H. erewrite regs_valnums_correct; eauto.
discriminate.
discriminate.
Qed.
Lemma reduce_sound:
forall op rl vl op' rl' res,
reduce A f n op rl vl = (op', rl') ->
map valu vl = rs##rl ->
sem op rs##rl = Some res ->
sem op' rs##rl' = Some res.
Proof.
unfold reduce; intros.
destruct (reduce_rec A f n 4%nat op vl) as [[op1 rl1] | ] eqn:?; inv H.
eapply reduce_rec_sound; eauto. congruence.
auto.
Qed.
End REDUCE.
End SATISFIABILITY.
(** The numberings associated to each instruction by the static analysis
are inductively satisfiable, in the following sense: the numbering
at the function entry point is satisfiable, and for any RTL execution
from [pc] to [pc'], satisfiability at [pc] implies
satisfiability at [pc']. *)
Theorem analysis_correct_1:
forall ge sp rs m f approx pc pc' i,
analyze f = Some approx ->
f.(fn_code)!pc = Some i -> In pc' (successors_instr i) ->
numbering_satisfiable ge sp rs m (transfer f pc approx!!pc) ->
numbering_satisfiable ge sp rs m approx!!pc'.
Proof.
intros.
assert (Numbering.ge approx!!pc' (transfer f pc approx!!pc)).
eapply Solver.fixpoint_solution; eauto.
apply H3. auto.
Qed.
Theorem analysis_correct_entry:
forall ge sp rs m f approx,
analyze f = Some approx ->
numbering_satisfiable ge sp rs m approx!!(f.(fn_entrypoint)).
Proof.
intros.
replace (approx!!(f.(fn_entrypoint))) with Solver.L.top.
apply empty_numbering_satisfiable.
symmetry. eapply Solver.fixpoint_entry; eauto.
Qed.
(*LENB: copied the notion of register agreement from TailcallproofEFF*)
Definition regset_inject j (rs rs': regset) : Prop :=
forall r, val_inject j (rs#r) (rs'#r).
Lemma regset_get_list j:
forall rs rs' l,
regset_inject j rs rs' -> val_list_inject j (rs##l) (rs'##l).
Proof.
induction l; simpl; intros; constructor; auto.
Qed.
Lemma regset_set j:
forall rs rs' v v' r,
regset_inject j rs rs' -> val_inject j v v' ->
regset_inject j (rs#r <- v) (rs'#r <- v').
Proof.
intros; red; intros. repeat rewrite PMap.gsspec. destruct (peq r0 r); auto.
Qed.
Lemma regset_init_regs j:
forall params vl vl',
val_list_inject j vl vl' ->
regset_inject j (init_regs vl params) (init_regs vl' params).
Proof.
induction params; intros.
simpl. red; intros. rewrite Regmap.gi. constructor.
simpl. inv H. red; intros. rewrite Regmap.gi. constructor.
apply regset_set. auto. auto.
Qed.
Lemma regset_incr j:
forall rs rs' j' (RI: regset_inject j rs rs')
(INC: inject_incr j j'), regset_inject j' rs rs'.
Proof.
intros. red; intros.
eapply val_inject_incr; eauto.
Qed.
Definition RHS_max mx (rh:rhs):Prop :=
match rh with
Op _ l => forall a, In a l -> Plt a mx
| Load _ _ l => forall a, In a l -> Plt a mx
end.
(** * Semantic preservation *)
Section PRESERVATION.
Variable prog: program.
Variable tprog : program.
Hypothesis TRANSF: transf_program prog = OK tprog.
Let ge := Genv.globalenv prog.
Let tge := Genv.globalenv tprog.
Lemma symbols_preserved:
forall (s: ident), Genv.find_symbol tge s = Genv.find_symbol ge s.
Proof (Genv.find_symbol_transf_partial transf_fundef prog TRANSF).
Lemma varinfo_preserved:
forall b, Genv.find_var_info tge b = Genv.find_var_info ge b.
Proof (Genv.find_var_info_transf_partial transf_fundef prog TRANSF).
Lemma functions_translated:
forall (v: val) (f: RTL.fundef),
Genv.find_funct ge v = Some f ->
exists tf, Genv.find_funct tge v = Some tf /\ transf_fundef f = OK tf.
Proof (Genv.find_funct_transf_partial transf_fundef prog TRANSF).
Lemma funct_ptr_translated:
forall (b: block) (f: RTL.fundef),
Genv.find_funct_ptr ge b = Some f ->
exists tf, Genv.find_funct_ptr tge b = Some tf /\ transf_fundef f = OK tf.
Proof (Genv.find_funct_ptr_transf_partial transf_fundef prog TRANSF).
Lemma sig_preserved:
forall f tf, transf_fundef f = OK tf -> funsig tf = funsig f.
Proof.
unfold transf_fundef; intros. destruct f; monadInv H; auto.
unfold transf_function in EQ. destruct (type_function f); try discriminate.
destruct (analyze f); try discriminate. inv EQ; auto.
Qed.
Lemma find_function_translated:
forall ros rs f,
find_function ge ros rs = Some f ->
exists tf, find_function tge ros rs = Some tf /\ transf_fundef f = OK tf.
Proof.
intros until f; destruct ros; simpl.
intro. apply functions_translated; auto.
rewrite symbols_preserved. destruct (Genv.find_symbol ge i); intro.
apply funct_ptr_translated; auto.
discriminate.
Qed.
Lemma find_function_preserved' mu rs rs' ros fd tf: forall
(FIND: find_function ge ros rs = Some fd)
(GFP : globalfunction_ptr_inject ge (as_inj mu))
(RI : regset_inject (restrict (as_inj mu) (vis mu)) rs rs')
(TF : transf_fundef fd = OK tf),
find_function tge ros rs' = Some tf.
Proof. intros.
unfold find_function, Genv.find_funct in FIND.
unfold find_function, Genv.find_funct.
destruct ros.
specialize (RI r). destruct (rs # r); inv FIND. inv RI.
destruct (Int.eq_dec i Int.zero); inv H0.
destruct (GFP _ _ H1).
destruct (restrictD_Some _ _ _ _ _ H3).
rewrite H4 in H; inv H. rewrite Int.add_zero.
destruct (funct_ptr_translated _ _ H1) as [? [? ?]].
rewrite H. rewrite H6 in TF; inv TF.
destruct (Int.eq_dec Int.zero Int.zero); trivial. elim n; trivial.
rewrite symbols_preserved.
destruct (Genv.find_symbol ge i); inv FIND.
destruct (funct_ptr_translated _ _ H0) as [? [? ?]].
rewrite H1 in TF; inv TF. trivial.
Qed.
Lemma GDE_lemma: genvs_domain_eq ge tge.
Proof.
unfold genvs_domain_eq, genv2blocks.
simpl; split; intros.
split; intros; destruct H as [id Hid].
rewrite <- symbols_preserved in Hid.
exists id; trivial.
rewrite symbols_preserved in Hid.
exists id; trivial.
split. intros. rewrite varinfo_preserved. intuition.
intros. split.
intros [f H].
apply funct_ptr_translated in H.
destruct H as [? [? _]].
eexists; eassumption.
intros [f H].
apply (@Genv.find_funct_ptr_rev_transf_partial
_ _ _ transf_fundef prog _ TRANSF) in H.
destruct H as [? [? _]]. eexists; eassumption.
Qed.
Definition transf_function' (f: function) (approxs: PMap.t numbering) : function :=
mkfunction
f.(fn_sig)
f.(fn_params)
f.(fn_stacksize)
(transf_code approxs f.(fn_code))
f.(fn_entrypoint).
(** The proof of semantic preservation is a simulation argument using
diagrams of the following form:
<<
st1 --------------- st2
| |
t| |t
| |
v v
st1'--------------- st2'
>>
Left: RTL execution in the original program. Right: RTL execution in
the optimized program. Precondition (top) and postcondition (bottom):
agreement between the states, including the fact that
the numbering at [pc] (returned by the static analysis) is satisfiable.
*)
Definition SPlocal mu sp sp':=
exists spb spb', sp=Vptr spb Int.zero /\
sp'=Vptr spb' Int.zero /\
local_of mu spb = Some(spb',0).
Inductive match_stackframes mu:
list stackframe -> list stackframe -> typ -> Prop :=
| match_stackframes_nil:
forall ty, match_stackframes mu nil nil ty (*WAS: ty=Tint*)
| match_stackframes_cons:
forall res sp pc rs f approx tyenv s s' ty sp' rs'
(ANALYZE: analyze f = Some approx)
(WTF: wt_function f tyenv)
(WTREGS: wt_regset tyenv rs)
(TYRES: subtype ty (tyenv res) = true)
(SAT: forall v m, numbering_satisfiable ge sp
(rs#res <- v) m approx!!pc)
(RI: regset_inject (restrict (as_inj mu) (vis mu)) rs rs')
(STACKS: match_stackframes mu s s' (proj_sig_res (fn_sig f)))
(SP: SPlocal mu sp sp'),
match_stackframes mu
(Stackframe res f sp pc rs :: s)
(Stackframe res (transf_function' f approx) sp' pc rs' :: s')
ty.
Lemma match_stackframes_intern_incr mu s s' ty mu': forall
(MS: match_stackframes mu s s' ty)
(INC: intern_incr mu mu') (WD': SM_wd mu'),
match_stackframes mu' s s' ty.
Proof. intros.
induction MS; econstructor; eauto.
eapply regset_incr; try eassumption.
eapply intern_incr_restrict; eassumption.
destruct SP as [spb [spb' [B [B' SP]]]].
exists spb, spb'. eapply INC in SP. auto.
Qed.
Lemma match_stackframes_extern_incr mu s s' ty mu': forall
(MS: match_stackframes mu s s' ty)
(INC: extern_incr mu mu') (WD': SM_wd mu'),
match_stackframes mu' s s' ty.
Proof. intros.
induction MS; econstructor; eauto.
eapply regset_incr; try eassumption.
eapply extern_incr_restrict; eassumption.
destruct SP as [spb [spb' [B [B' SP]]]].
exists spb, spb'. rewrite <- (extern_incr_local _ _ INC). auto.
Qed.
Lemma match_stackframes_replace_locals mu ty: forall s s'
(MS : match_stackframes mu s s' ty) pubS pubT,
match_stackframes (replace_locals mu pubS pubT) s s' ty.
Proof.
intros.
induction MS; econstructor; eauto.
rewrite replace_locals_as_inj, replace_locals_vis. assumption.
destruct SP as [spb [spb' [SP [SP' LOC]]]].
exists spb, spb'. rewrite replace_locals_local. auto.
Qed.
Lemma match_stackframes_replace_externs mu ty s s': forall
(MS: match_stackframes mu s s' ty) frgnS frgnT
(HfrgnSrc: forall b, frgnBlocksSrc mu b = true -> frgnS b = true),
match_stackframes (replace_externs mu frgnS frgnT) s s' ty.
Proof.
intros.
induction MS; econstructor; eauto.
rewrite replace_externs_as_inj, replace_externs_vis.
eapply regset_incr; eauto.
red; intros. destruct (restrictD_Some _ _ _ _ _ H).
apply restrictI_Some; trivial.
unfold vis in H1. destruct (locBlocksSrc mu b); simpl in *; trivial.
apply HfrgnSrc; trivial.
destruct SP as [spb [spb' [SP [SP' LOC]]]].
exists spb, spb'. rewrite replace_externs_local. auto.
Qed.
Inductive match_states mu: RTL_core -> mem -> RTL_core -> mem -> Prop :=
| match_states_intro:
forall s sp pc rs m s' f approx tyenv sp' rs' m'
(ANALYZE: analyze f = Some approx)
(WTF: wt_function f tyenv)
(WTREGS: wt_regset tyenv rs)
(SAT: numbering_satisfiable ge sp rs m approx!!pc)
(RI: regset_inject (restrict (as_inj mu) (vis mu)) rs rs')
(SP: SPlocal mu sp sp')
(STACKS: match_stackframes mu s s' (proj_sig_res (fn_sig f))),
match_states mu (RTL_State s f sp pc rs) m
(RTL_State s' (transf_function' f approx) sp' pc rs') m'
| match_states_call:
forall s f tf args m s' args' m',
match_stackframes mu s s' (proj_sig_res (funsig f)) ->
Val.has_type_list args (sig_args (funsig f)) ->
transf_fundef f = OK tf ->
val_list_inject (restrict (as_inj mu) (vis mu)) args args' ->
match_states mu (RTL_Callstate s f args) m
(RTL_Callstate s' tf args') m'
| match_states_return:
forall s s' ty v m v' m',
match_stackframes mu s s' ty ->
Val.has_type v ty ->
val_inject (restrict (as_inj mu) (vis mu)) v v' ->
match_states mu (RTL_Returnstate s v) m
(RTL_Returnstate s' v') m'.
Definition MATCH mu c1 m1 c2 m2:Prop :=
match_states (restrict_sm mu (vis mu)) c1 m1 c2 m2 /\
REACH_closed m1 (vis mu) /\
meminj_preserves_globals ge (as_inj mu) /\
globalfunction_ptr_inject ge (as_inj mu) /\
(forall b, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true) /\
sm_valid mu m1 m2 /\ SM_wd mu /\ Mem.inject (as_inj mu) m1 m2.
Lemma MATCH_wd: forall mu c1 m1 c2 m2
(MC: MATCH mu c1 m1 c2 m2), SM_wd mu.
Proof. intros. eapply MC. Qed.
Lemma MATCH_RC: forall mu c1 m1 c2 m2
(MC: MATCH mu c1 m1 c2 m2), REACH_closed m1 (vis mu).
Proof. intros. eapply MC. Qed.
Lemma MATCH_restrict: forall mu c1 m1 c2 m2 X
(MC: MATCH mu c1 m1 c2 m2)
(HX: forall b : block, vis mu b = true -> X b = true)
(RX: REACH_closed m1 X),
MATCH (restrict_sm mu X) c1 m1 c2 m2.
Proof. intros.
destruct MC as [MS [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]]].
assert (WDR: SM_wd (restrict_sm mu X)).
apply restrict_sm_WD; assumption.
split.
rewrite vis_restrict_sm, restrict_sm_nest; assumption.
split. unfold vis.
rewrite restrict_sm_locBlocksSrc, restrict_sm_frgnBlocksSrc.
apply RC.
split. clear -PG Glob HX.
eapply restrict_sm_preserves_globals; try eassumption.
unfold vis in HX. intuition.
split. rewrite restrict_sm_all.
eapply restrict_preserves_globalfun_ptr; try eassumption.
unfold vis in HX. intuition.
split.
rewrite restrict_sm_frgnBlocksSrc. apply Glob.
split.
destruct SMV.
split; intros.
rewrite restrict_sm_DOM in H1.
apply (H _ H1).
rewrite restrict_sm_RNG in H1.
apply (H0 _ H1).
split. assumption.
rewrite restrict_sm_all.
eapply inject_restrict; eassumption.
Qed.
Lemma MATCH_valid: forall mu c1 m1 c2 m2
(MC: MATCH mu c1 m1 c2 m2), sm_valid mu m1 m2.
Proof. intros. eapply MC. Qed.
Lemma MATCH_PG: forall mu c1 m1 c2 m2
(MC: MATCH mu c1 m1 c2 m2),
meminj_preserves_globals ge (extern_of mu) /\
(forall b : block, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true).
Proof.
intros.
assert (GF: forall b, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true).
apply MC.
split; trivial.
rewrite <- match_genv_meminj_preserves_extern_iff_all; trivial.
apply MC. apply MC.
Qed.
(*NEW*) Variable hf : I64Helpers.helper_functions.
Lemma MATCH_atExternal: forall (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core)
(m2 : mem) (e : external_function) (vals1 : list val) (ef_sig : signature),
MATCH mu c1 m1 c2 m2 ->
at_external (rtl_eff_sem hf) c1 = Some (e, ef_sig, vals1) ->
Mem.inject (as_inj mu) m1 m2 /\
(exists vals2 : list val,
Forall2 (val_inject (restrict (as_inj mu) (vis mu))) vals1 vals2 /\
at_external (rtl_eff_sem hf) c2 = Some (e, ef_sig, vals2) /\
(forall pubSrc' pubTgt' : block -> bool,
pubSrc' =
(fun b : block => locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b) ->
pubTgt' =
(fun b : block => locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b) ->
forall nu : SM_Injection,
nu = replace_locals mu pubSrc' pubTgt' ->
MATCH nu c1 m1 c2 m2 /\ Mem.inject (shared_of nu) m1 m2)).
Proof. intros.
destruct H as [MC [RC [PG [GFP [Glob [SMV [WDmu MINJ]]]]]]].
simpl in *. inv MC; simpl in *; inv H0.
destruct f; inv H5.
split; trivial.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
simpl.
monadInv H2.
destruct (observableEF_dec hf e0); inv H4.
eexists.
split. eapply val_list_inject_forall_inject. eassumption.
split. reflexivity.
intros.
assert (WDnu: SM_wd nu).
subst.
eapply replace_locals_wd; eauto.
intros b Hb.
apply andb_true_iff in Hb. destruct Hb.
exploit (REACH_local_REACH _ WDmu); try eassumption.
eapply val_list_inject_forall_inject.
eapply val_list_inject_incr; try eassumption.
apply restrict_incr.
intros [b2 [d [loc R2]]].
exists b2, d.
rewrite loc, R2. destruct (local_DomRng _ WDmu _ _ _ loc). intuition.
intros b Hb. apply andb_true_iff in Hb. eapply Hb.
split. subst.
split.
econstructor; eauto; rewrite <- restrict_sm_replace_locals, replace_locals_vis.
eapply match_stackframes_replace_locals; eassumption.
rewrite replace_locals_as_inj, replace_locals_vis.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest; trivial.
rewrite replace_locals_as_inj, replace_locals_vis,
replace_locals_frgnBlocksSrc.
intuition.
split; intros b Hb.
rewrite replace_locals_DOM in Hb. eapply SMV; trivial.
rewrite replace_locals_RNG in Hb. eapply SMV; trivial.
assert (MINJR: Mem.inject (restrict (as_inj mu) (vis mu)) m1 m2).
eapply inject_restrict; try eassumption.
assert (RCnu: REACH_closed m1 (mapped (shared_of nu))).
subst. rewrite replace_locals_shared.
clear MINJ.
red; intros b Hb. apply REACHAX in Hb. destruct Hb as [L HL].
generalize dependent b.
induction L; simpl; intros; inv HL. trivial.
specialize (IHL _ H4); clear H4.
destruct (mappedD_true _ _ IHL) as [[bb ofs] Hbb]. clear IHL.
apply mapped_charT.
assert (MV:= Mem.mi_memval _ _ _ (Mem.mi_inj _ _ _ MINJR)).
destruct (joinD_Some _ _ _ _ _ Hbb); clear Hbb.
exploit (MV b' z bb ofs).
eapply restrictI_Some. apply foreign_in_all; eassumption.
unfold vis. unfold foreign_of in H0. destruct mu. simpl in *.
destruct (frgnBlocksSrc b'); inv H0. intuition.
assumption.
clear MV; intros. rewrite H7 in H2. inv H2.
exists (b2, delta). apply joinI.
remember (locBlocksSrc mu b) as d.
destruct d; apply eq_sym in Heqd.
right; simpl.
split. eapply locBlocksSrc_foreignNone; eassumption.
destruct (restrictD_Some _ _ _ _ _ H8); clear H8.
destruct (joinD_Some _ _ _ _ _ H2).
destruct (extern_DomRng _ WDmu _ _ _ H6).
apply extBlocksSrc_locBlocksSrc in H8. rewrite H8 in Heqd; inv Heqd.
trivial.
destruct H6. rewrite H8.
assert (Rb: REACH m1 (exportedSrc mu vals1) b = true).
eapply REACH_cons; try eassumption.
eapply REACH_nil. unfold exportedSrc, sharedSrc. apply foreign_in_shared in H0. rewrite H0. intuition.
rewrite Rb; trivial.
left. eapply restrict_vis_foreign; try eassumption.
destruct (restrictD_Some _ _ _ _ _ H8).
rewrite (as_inj_locBlocks _ _ _ _ WDmu H2) in Heqd. trivial.
destruct H0. remember (locBlocksSrc mu b' && REACH m1 (exportedSrc mu vals1) b') as d.
destruct d; apply eq_sym in Heqd; inv H2.
apply andb_true_iff in Heqd; destruct Heqd.
exploit (MV b' z bb ofs).
eapply restrictI_Some. apply local_in_all; eassumption.
unfold vis. rewrite H2; trivial.
assumption.
clear MV; intros. rewrite H7 in H8. inv H8.
exists (b2, delta). apply joinI.
remember (locBlocksSrc mu b) as d.
destruct d; apply eq_sym in Heqd.
right; simpl. destruct (restrictD_Some _ _ _ _ _ H11); clear H11.
split. eapply locBlocksSrc_foreignNone; eassumption.
destruct (joinD_Some _ _ _ _ _ H8).
destruct (extern_DomRng _ WDmu _ _ _ H10).
apply extBlocksSrc_locBlocksSrc in H11. rewrite H11 in Heqd; inv Heqd.
trivial.
destruct H10. rewrite H11.
assert (REACH m1 (exportedSrc mu vals1) b = true).
eapply REACH_cons; try eassumption.
rewrite H12. trivial.
simpl. left. eapply restrict_vis_foreign; try eassumption.
destruct (restrictD_Some _ _ _ _ _ H11).
rewrite (as_inj_locBlocks _ _ _ _ WDmu H8) in Heqd. trivial.
eapply inject_mapped. eapply MINJ. eassumption.
subst. rewrite replace_locals_shared.
red; intros b b' delta Hb. destruct (joinD_Some _ _ _ _ _ Hb); clear Hb.
eapply foreign_in_all; eassumption.
destruct H0.
destruct (locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b); inv H2.
rewrite H5; eapply local_in_all; eassumption.
Qed.
Lemma MATCH_afterExternal: forall (mu : SM_Injection) (st1 st2 : RTL_core) (m1 : mem)
(e : external_function) (vals1 : list val) (m2 : mem) (ef_sig : signature)
(vals2 : list val) (e' : external_function) (ef_sig' : signature)
(INJ: Mem.inject (as_inj mu) m1 m2)
(MTCH: MATCH mu st1 m1 st2 m2)
(AtExtSrc: at_external (rtl_eff_sem hf) st1 = Some (e, ef_sig, vals1))
(AtExtTgt: at_external (rtl_eff_sem hf) st2 = Some (e', ef_sig', vals2))
(ArgsInj: Forall2 (val_inject (restrict (as_inj mu) (vis mu))) vals1 vals2)
(pubSrc' : block -> bool)
(HpubSrc: pubSrc' = (fun b => locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b))
(pubTgt' : block -> bool)
(HpubTgt: pubTgt' = (fun b => locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b))
(nu : SM_Injection)
(Hnu: nu = replace_locals mu pubSrc' pubTgt')
(nu' : SM_Injection) (ret1 : val) (m1' : mem) (ret2 : val) (m2' : mem)
(Ret1TP: Val.has_type ret1 (proj_sig_res (AST.ef_sig e)))
(Ret2TP: Val.has_type ret2 (proj_sig_res (AST.ef_sig e')))
(INC: extern_incr nu nu')
(SEP: globals_separate tge nu nu')
(WDnu': SM_wd nu')
(SMVnu': sm_valid nu' m1' m2')
(INJnu': Mem.inject (as_inj nu') m1' m2')
(RetInj: val_inject (as_inj nu') ret1 ret2)
(FWD1: mem_forward m1 m1')
(FWD2: mem_forward m2 m2')
(frgnSrc' : block -> bool)
(HfrgnSrc: frgnSrc' = (fun b =>
DomSrc nu' b &&
(negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b)))
(frgnTgt' : block -> bool)
(HfrgnTgt: frgnTgt' = (fun b =>
DomTgt nu' b &&
(negb (locBlocksTgt nu' b) && REACH m2' (exportedTgt nu' (ret2 :: nil)) b)))
mu' (Hmu': mu' = replace_externs nu' frgnSrc' frgnTgt')
(UnchPrivSrc: Mem.unchanged_on (fun b z => locBlocksSrc nu b = true /\ pubBlocksSrc nu b = false) m1 m1')
(UnchLOOR: Mem.unchanged_on (local_out_of_reach nu m1) m2 m2'),
exists st1' st2' : RTL_core,
after_external (rtl_eff_sem hf) (Some ret1) st1 = Some st1' /\
after_external (rtl_eff_sem hf) (Some ret2) st2 = Some st2' /\
MATCH mu' st1' m1' st2' m2'.
Proof. intros. simpl.
destruct MTCH as [MC [RC [PG [GFP [Glob [VAL [WDmu MINJ]]]]]]].
simpl in *. inv MC; simpl in *; inv AtExtSrc.
destruct f; inv H4. simpl in *.
inv H1.
destruct (observableEF_dec hf e0); inv H5; inv AtExtTgt.
eexists. eexists.
split. reflexivity.
split. reflexivity.
(* inv TF.*)
assert (INCvisNu': inject_incr
(restrict (as_inj nu')
(vis
(replace_externs nu'
(fun b : Values.block =>
DomSrc nu' b &&
(negb (locBlocksSrc nu' b) &&
REACH m1' (exportedSrc nu' (ret1 :: nil)) b))
(fun b : Values.block =>
DomTgt nu' b &&
(negb (locBlocksTgt nu' b) &&
REACH m2' (exportedTgt nu' (ret2 :: nil)) b))))) (as_inj nu')).
unfold vis. rewrite replace_externs_frgnBlocksSrc, replace_externs_locBlocksSrc.
apply restrict_incr.
assert (RC': REACH_closed m1' (mapped (as_inj nu'))).
eapply inject_REACH_closed; eassumption.
assert (PGnu': meminj_preserves_globals (Genv.globalenv prog) (as_inj nu')).
eapply meminj_preserves_globals_extern_incr_separate. eassumption.
rewrite replace_locals_as_inj. assumption.
assumption.
specialize (genvs_domain_eq_isGlobal _ _ GDE_lemma). intros GL.
red. unfold ge in GL. rewrite GL. apply SEP.
assert (RR1: REACH_closed m1'
(fun b : Values.block =>
locBlocksSrc nu' b
|| DomSrc nu' b &&
(negb (locBlocksSrc nu' b) &&
REACH m1' (exportedSrc nu' (ret1 :: nil)) b))).
intros b Hb. rewrite REACHAX in Hb. destruct Hb as [L HL].
generalize dependent b.
induction L; simpl; intros; inv HL.
assumption.
specialize (IHL _ H4); clear H4.
apply orb_true_iff in IHL.
remember (locBlocksSrc nu' b') as l.
destruct l; apply eq_sym in Heql.
(*case locBlocksSrc nu' b' = true*)
clear IHL.
remember (pubBlocksSrc nu' b') as p.
destruct p; apply eq_sym in Heqp.
assert (Rb': REACH m1' (mapped (as_inj nu')) b' = true).
apply REACH_nil.
destruct (pubSrc _ WDnu' _ Heqp) as [bb2 [dd1 [PUB PT]]].
eapply mappedI_true.
apply (pub_in_all _ WDnu' _ _ _ PUB).
assert (Rb: REACH m1' (mapped (as_inj nu')) b = true).
eapply REACH_cons; try eassumption.
specialize (RC' _ Rb).
destruct (mappedD_true _ _ RC') as [[b2 d1] AI'].
remember (locBlocksSrc nu' b) as d.
destruct d; simpl; trivial.
apply andb_true_iff.
split. eapply as_inj_DomRng; try eassumption.
eapply REACH_cons; try eassumption.
apply REACH_nil. unfold exportedSrc.
rewrite (pubSrc_shared _ WDnu' _ Heqp). intuition.
destruct (UnchPrivSrc) as [UP UV]; clear UnchLOOR.
specialize (UP b' z Cur Readable).
specialize (UV b' z).
destruct INC as [_ [_ [_ [_ [LCnu' [_ [PBnu' [_ [FRGnu' _]]]]]]]]].
rewrite <- LCnu'. rewrite replace_locals_locBlocksSrc.
rewrite <- LCnu' in Heql. rewrite replace_locals_locBlocksSrc in *.
rewrite <- PBnu' in Heqp. rewrite replace_locals_pubBlocksSrc in *.
clear INCvisNu'.
rewrite Heql in *. simpl in *. intuition.
assert (VB: Mem.valid_block m1 b').
eapply VAL. unfold DOM, DomSrc. rewrite Heql. intuition.
apply (H1 VB) in H5.
rewrite (H3 H5) in H7. clear H1 H3.
remember (locBlocksSrc mu b) as q.
destruct q; simpl; trivial; apply eq_sym in Heqq.
assert (Rb : REACH m1 (vis mu) b = true).
eapply REACH_cons; try eassumption.
apply REACH_nil. unfold vis. rewrite Heql; trivial.
specialize (RC _ Rb). unfold vis in RC.
rewrite Heqq in RC; simpl in *.
rewrite replace_locals_frgnBlocksSrc in FRGnu'.
rewrite FRGnu' in RC.
apply andb_true_iff.
split. unfold DomSrc. rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ RC). intuition.
apply REACH_nil. unfold exportedSrc.
rewrite (frgnSrc_shared _ WDnu' _ RC). intuition.
(*case DomSrc nu' b' &&
(negb (locBlocksSrc nu' b') &&
REACH m1' (exportedSrc nu' (ret1 :: nil)) b') = true*)
destruct IHL. congruence.
apply andb_true_iff in H1. simpl in H1.
destruct H1 as [DomNu' Rb'].
clear INC SEP INCvisNu' UnchLOOR UnchPrivSrc.
remember (locBlocksSrc nu' b) as d.
destruct d; simpl; trivial. apply eq_sym in Heqd.
apply andb_true_iff.
split. assert (RET: Forall2 (val_inject (as_inj nu')) (ret1::nil) (ret2::nil)).
constructor. assumption. constructor.
destruct (REACH_as_inj _ WDnu' _ _ _ _ INJnu' RET
_ Rb' (fun b => true)) as [b2 [d1 [AI' _]]]; trivial.
assert (REACH m1' (mapped (as_inj nu')) b = true).
eapply REACH_cons; try eassumption.
apply REACH_nil. eapply mappedI_true; eassumption.
specialize (RC' _ H1).
destruct (mappedD_true _ _ RC') as [[? ?] ?].
eapply as_inj_DomRng; eassumption.
eapply REACH_cons; try eassumption.
assert (RRC: REACH_closed m1' (fun b : Values.block =>
mapped (as_inj nu') b &&
(locBlocksSrc nu' b
|| DomSrc nu' b &&
(negb (locBlocksSrc nu' b) &&
REACH m1' (exportedSrc nu' (ret1 :: nil)) b)))).
eapply REACH_closed_intersection; eassumption.
assert (GFnu': forall b, isGlobalBlock (Genv.globalenv prog) b = true ->
DomSrc nu' b &&
(negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b) = true).
intros. specialize (Glob _ H1).
assert (FSRC:= extern_incr_frgnBlocksSrc _ _ INC).
rewrite replace_locals_frgnBlocksSrc in FSRC.
rewrite FSRC in Glob.
rewrite (frgnBlocksSrc_locBlocksSrc _ WDnu' _ Glob).
apply andb_true_iff; simpl.
split.
unfold DomSrc. rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ Glob). intuition.
apply REACH_nil. unfold exportedSrc.
rewrite (frgnSrc_shared _ WDnu' _ Glob). intuition.
split. (* clear - WDnu' INC H INJnu' RetInj H0.*)
econstructor; try eassumption;
try rewrite <- restrict_sm_replace_externs, replace_externs_vis.
eapply match_stackframes_replace_externs.
eapply match_stackframes_extern_incr; try eapply INC; trivial.
eapply match_stackframes_replace_locals; eauto.
instantiate (1:= fun b =>
locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b).
instantiate (1:= fun b =>
locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b).
red in INC. red.
rewrite replace_locals_frgnBlocksSrc, replace_locals_frgnBlocksTgt,
replace_locals_pubBlocksSrc, replace_locals_pubBlocksTgt,
replace_locals_locBlocksSrc, replace_locals_locBlocksTgt,
replace_locals_extBlocksSrc, replace_locals_extBlocksTgt,
replace_locals_local, replace_locals_extern in *.
rewrite restrict_sm_frgnBlocksSrc, restrict_sm_frgnBlocksTgt,
restrict_sm_pubBlocksSrc, restrict_sm_pubBlocksTgt,
restrict_sm_locBlocksSrc, restrict_sm_locBlocksTgt,
restrict_sm_extBlocksSrc, restrict_sm_extBlocksTgt,
restrict_sm_local, restrict_sm_extern.
rewrite restrict_sm_frgnBlocksSrc, restrict_sm_frgnBlocksTgt,
restrict_sm_locBlocksSrc, restrict_sm_locBlocksTgt,
restrict_sm_extBlocksSrc, restrict_sm_extBlocksTgt,
restrict_sm_local, restrict_sm_extern.
intuition.
red; intros. destruct (restrictD_Some _ _ _ _ _ H11); clear H11.
apply restrictI_Some. apply H1; trivial.
unfold vis in H14. unfold DomSrc. rewrite H6 in H14.
destruct (locBlocksSrc nu' b); simpl in *; trivial.
rewrite H10 in H14.
rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ H14); simpl.
apply REACH_nil. unfold exportedSrc.
rewrite (frgnSrc_shared _ WDnu' _ H14). intuition.
rewrite H4. unfold restrict. extensionality b.
unfold vis, DomSrc. rewrite H10, H6.
remember (local_of nu' b) as d.
destruct d; apply eq_sym in Heqd.
destruct p.
destruct (local_DomRng _ WDnu' _ _ _ Heqd) as [lS _].
rewrite lS; simpl; trivial.
destruct (locBlocksSrc nu' b || frgnBlocksSrc nu' b).
destruct (locBlocksSrc nu' b
|| (locBlocksSrc nu' b || extBlocksSrc nu' b) &&
(negb (locBlocksSrc nu' b) &&
REACH m1' (exportedSrc nu' (ret1 :: nil)) b)); simpl; trivial.
destruct (locBlocksSrc nu' b
|| (locBlocksSrc nu' b || extBlocksSrc nu' b) &&
(negb (locBlocksSrc nu' b) &&
REACH m1' (exportedSrc nu' (ret1 :: nil)) b)); simpl; trivial.
apply restrict_sm_WD. assumption.
unfold vis, DomSrc. intros.
destruct (locBlocksSrc nu' b); simpl in *; trivial.
rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ H1); simpl.
apply REACH_nil. unfold exportedSrc.
rewrite (frgnSrc_shared _ WDnu' _ H1). intuition.
rewrite restrict_sm_frgnBlocksSrc.
unfold DomSrc. intros.
rewrite (frgnBlocksSrc_locBlocksSrc _ WDnu' _ H1); simpl.
rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ H1); simpl.
apply REACH_nil. unfold exportedSrc.
rewrite (frgnSrc_shared _ WDnu' _ H1). intuition.
unfold vis. rewrite replace_externs_as_inj.
rewrite replace_externs_frgnBlocksSrc, replace_externs_locBlocksSrc,
restrict_sm_locBlocksSrc, restrict_sm_all.
rewrite restrict_nest; trivial.
eapply restrict_val_inject; try eassumption.
intros.
destruct (getBlocks_inject (as_inj nu') (ret1::nil) (ret2::nil))
with (b:=b) as [bb [dd [JJ' GBbb]]]; try eassumption.
constructor. assumption. constructor.
remember (locBlocksSrc nu' b) as d.
destruct d; simpl; trivial. apply andb_true_iff.
split. eapply as_inj_DomRng; eassumption.
apply REACH_nil. unfold exportedSrc.
rewrite H1; trivial.
rewrite replace_externs_as_inj; trivial.
destruct (eff_after_check2 _ _ _ _ _ INJnu' RetInj
_ (eq_refl _) _ (eq_refl _) _ (eq_refl _) WDnu' SMVnu').
unfold vis in *.
rewrite replace_externs_locBlocksSrc,
replace_externs_frgnBlocksSrc in *.
intuition.
red; intros b fb Hb. destruct (GFP _ _ Hb). split; trivial.
eapply extern_incr_as_inj; try eassumption.
rewrite replace_locals_as_inj. assumption.
Qed.
Ltac TransfInstr :=
match goal with
| H1: (PTree.get ?pc ?c = Some ?instr), f: function, approx: PMap.t numbering |- _ =>
cut ((transf_function' f approx).(fn_code)!pc = Some(transf_instr approx!!pc instr));
[ simpl transf_instr
| unfold transf_function', transf_code; simpl; rewrite PTree.gmap;
unfold option_map; rewrite H1; reflexivity ]
end.
Lemma MATCH_effcore_diagram:
forall st1 m1 st1' m1' (U1 : block -> Z -> bool)
(CS: effstep (rtl_eff_sem hf) ge U1 st1 m1 st1' m1')
st2 mu m2
(MTCH: MATCH mu st1 m1 st2 m2),
exists st2' m2' (U2 : block -> Z -> bool),
effstep (rtl_eff_sem hf) tge U2 st2 m2 st2' m2'
/\ exists mu',
intern_incr mu mu' /\
globals_separate ge mu mu' /\
sm_locally_allocated mu mu' m1 m2 m1' m2' /\
MATCH mu' st1' m1' st2' m2' /\
SM_wd mu' /\
sm_valid mu' m1' m2' /\
(forall
(EffSrc: forall b ofs, U1 b ofs = true -> vis mu b = true)
b2 ofs, U2 b2 ofs = true ->
visTgt mu b2 = true /\
(locBlocksTgt mu b2 = false ->
exists (b1 : block) (delta1 : Z),
foreign_of mu b1 = Some (b2, delta1) /\
U1 b1 (ofs - delta1) = true /\
Mem.perm m1 b1 (ofs - delta1) Max Nonempty)).
Proof. intros.
assert (SymbPres := symbols_preserved).
induction CS;
destruct MTCH as [MSTATE PRE]; inv MSTATE.
{ (* Inop *)
exists (RTL_State s' (transf_function' f approx) sp' pc' rs'), m2.
eexists; split.
eapply rtl_effstep_exec_Inop; try TransfInstr; auto.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split.
econstructor; eauto.
exploit analysis_correct_1; try eassumption.
instantiate (1:=pc'); left; trivial.
unfold transfer. rewrite H. eassumption.
trivial.
intuition. }
{ (* Iop *)
TransfInstr; intros C.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]].
destruct SP as [spb [spb' [B [B' SP]]]]. subst sp; subst sp'.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))).
rewrite <- restrict_sm_all.
eapply restrict_sm_preserves_globals; try eassumption.
unfold vis. intuition.
assert (MInjR : Mem.inject (restrict (as_inj mu) (vis mu)) m m2).
eapply inject_restrict; assumption.
exploit eval_operation_inject; try eapply H0; eauto.
rewrite <- restrict_sm_all.
apply local_in_all.
apply restrict_sm_WD; trivial. eassumption.
eapply regset_get_list; eassumption.
rewrite eval_shift_stack_operation; simpl; rewrite Int.add_zero_l.
intros [v' [F G]].
destruct (is_trivial_op op) eqn:?.
(*case is_trivial_op op = true*)
exists (RTL_State s' (transf_function' f approx)
(Vptr spb' (Int.repr 0)) pc'
(rs'#res <- v')), m2.
eexists; split.
eapply rtl_effstep_exec_Iop'; eauto.
rewrite <- F. apply eval_operation_preserved.
exact symbols_preserved.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
eapply wt_exec_Iop; eauto. eapply wt_instr_at; eauto.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
eapply add_op_satisfiable; eauto. eapply wf_analyze; eauto.
rewrite vis_restrict_sm, restrict_sm_all,
restrict_nest in *; trivial.
eapply regset_set; assumption.
exists spb, spb'; auto.
(*case is_trivial_op op = false*)
destruct (valnum_regs approx!!pc args) as [n1 vl] eqn:?.
assert (wf_numbering approx!!pc). eapply wf_analyze; eauto.
destruct SAT as [valu1 NH1].
exploit valnum_regs_holds; eauto. intros [valu2 [NH2 [EQ AG]]].
assert (wf_numbering n1). eapply wf_valnum_regs; eauto.
destruct (find_rhs n1 (Op op vl)) as [r|] eqn:?.
(* replaced by move *)
assert (EV: rhs_evals_to ge (Vptr spb Int.zero) valu2
(Op op vl) m rs#r).
eapply find_rhs_correct; eauto.
assert (RES: rs#r = v). red in EV. congruence.
eexists; eexists; eexists.
split. eapply rtl_effstep_exec_Iop'; eauto.
simpl. reflexivity.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
eapply wt_exec_Iop; eauto. eapply wt_instr_at; eauto.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
eapply add_op_satisfiable; eauto.
exists valu1. assumption.
rewrite vis_restrict_sm, restrict_sm_all,
restrict_nest in *; trivial.
eapply regset_set; try assumption.
destruct v. constructor.
specialize (RI r); rewrite RES in RI. apply RI.
specialize (RI r); rewrite RES in RI. apply RI.
specialize (RI r); rewrite RES in RI. apply RI.
specialize (RI r); rewrite RES in RI. apply RI.
exists spb, spb'; auto.
(* possibly simplified *)
destruct (reduce operation combine_op n1 op args vl) as [op' args'] eqn:?.
assert (RES: eval_operation ge (Vptr spb Int.zero) op' rs##args' m = Some v).
eapply reduce_sound with (sem := fun op vl => eval_operation ge (Vptr spb Int.zero) op vl m); eauto.
intros; eapply combine_op_sound; eauto.
exploit eval_operation_inject; try eapply RES; eauto.
rewrite <- restrict_sm_all.
apply local_in_all.
apply restrict_sm_WD; trivial. eassumption.
eapply regset_get_list; eassumption.
rewrite eval_shift_stack_operation; simpl; rewrite Int.add_zero_l.
intros [v'' [F' G']].
eexists; eexists; eexists.
split. eapply rtl_effstep_exec_Iop'; eauto.
instantiate (1:=v'').
rewrite <- F'. apply eval_operation_preserved.
exact symbols_preserved.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
eapply wt_exec_Iop. 2: eapply H0. eapply wt_instr_at; eauto.
assumption.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
eapply add_op_satisfiable; eauto.
exists valu1. assumption.
rewrite vis_restrict_sm, restrict_sm_all,
restrict_nest in *; trivial.
eapply regset_set; assumption.
exists spb, spb'; auto. }
{ (* Iload *)
TransfInstr; intros C.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]].
destruct SP as [spb [spb' [B [B' SP]]]]. subst sp; subst sp'.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))).
rewrite <- restrict_sm_all.
eapply restrict_sm_preserves_globals; try eassumption.
unfold vis. intuition.
assert (MInjR : Mem.inject (restrict (as_inj mu) (vis mu)) m m2).
eapply inject_restrict; assumption.
(* exists (State s' (transf_function' f approx) sp pc' (rs#dst <- v) m); split.*)
destruct (valnum_regs approx!!pc args) as [n1 vl] eqn:?.
assert (wf_numbering approx!!pc). eapply wf_analyze; eauto.
destruct SAT as [valu1 NH1].
exploit valnum_regs_holds; eauto. intros [valu2 [NH2 [EQ AG]]].
assert (wf_numbering n1). eapply wf_valnum_regs; eauto.
destruct (find_rhs n1 (Load chunk addr vl)) as [r|] eqn:?.
{ (* replaced by move *)
assert (EV: rhs_evals_to ge (Vptr spb Int.zero) valu2
(Load chunk addr vl) m rs#r). eapply find_rhs_correct; eauto.
assert (RES: rs#r = v).
red in EV. destruct EV as [a' [EQ1 EQ2]]. congruence.
clear EV.
exploit eval_addressing_inject; try eexact H0; try eassumption.
rewrite <- restrict_sm_all.
apply local_in_all.
apply restrict_sm_WD; trivial. eassumption.
eapply regset_get_list; eassumption.
intros [a1' [F1 G1]].
exploit Mem.loadv_inject. eapply MInjR. eexact H1. eexact G1.
intros [v' [LD' ValInjV']].
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Iop'; eauto. simpl. reflexivity.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
eapply wt_exec_Iload; eauto. eapply wt_instr_at; eauto.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
eapply add_load_satisfiable; eauto. exists valu1. assumption.
rewrite vis_restrict_sm, restrict_sm_all,
restrict_nest in *; trivial.
eapply regset_set; try assumption.
destruct v. constructor.
specialize (RI r); rewrite RES in RI. apply RI.
specialize (RI r); rewrite RES in RI. apply RI.
specialize (RI r); rewrite RES in RI. apply RI.
specialize (RI r); rewrite RES in RI. apply RI.
exists spb, spb'; auto. }
{ (* possibly simplified *)
destruct (reduce addressing combine_addr n1 addr args vl)
as [addr' args'] eqn:?.
assert (ADDR: eval_addressing ge (Vptr spb Int.zero)
addr' rs##args' = Some a).
eapply reduce_sound with (sem := fun addr vl => eval_addressing ge (Vptr spb Int.zero) addr vl); eauto.
intros; eapply combine_addr_sound; eauto.
exploit eval_addressing_inject; try eexact ADDR; try eassumption.
rewrite <- restrict_sm_all.
apply local_in_all.
apply restrict_sm_WD; trivial. eassumption.
eapply regset_get_list; eassumption.
rewrite eval_shift_stack_addressing; simpl; rewrite Int.add_zero_l.
intros [a1' [F1 G1]].
exploit Mem.loadv_inject. eapply MInjR. eexact H1. eexact G1.
intros [v' [LD' ValInjV']].
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Iload; eauto.
rewrite <- F1. apply eval_addressing_preserved.
exact symbols_preserved.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
eapply wt_exec_Iload; eauto. eapply wt_instr_at; eauto.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
eapply add_load_satisfiable; eauto. exists valu1. assumption.
rewrite vis_restrict_sm, restrict_sm_all,
restrict_nest in *; trivial.
eapply regset_set; assumption.
exists spb, spb'; auto. } }
{ (* Istore *)
TransfInstr; intros C.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]].
destruct SP as [spb [spb' [B [B' SP]]]]. subst sp; subst sp'.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))).
rewrite <- restrict_sm_all.
eapply restrict_sm_preserves_globals; try eassumption.
unfold vis. intuition.
assert (MInjR : Mem.inject (restrict (as_inj mu) (vis mu)) m m2).
eapply inject_restrict; assumption.
destruct (valnum_regs approx!!pc args) as [n1 vl] eqn:?.
assert (wf_numbering approx!!pc). eapply wf_analyze; eauto.
destruct SAT as [valu1 NH1].
exploit valnum_regs_holds; eauto. intros [valu2 [NH2 [EQ AG]]].
assert (wf_numbering n1). eapply wf_valnum_regs; eauto.
destruct (reduce addressing combine_addr n1 addr args vl) as [addr' args'] eqn:?.
assert (ADDR: eval_addressing ge (Vptr spb Int.zero) addr' rs##args' = Some a).
eapply reduce_sound with (sem := fun addr vl => eval_addressing ge (Vptr spb Int.zero) addr vl); eauto.
intros; eapply combine_addr_sound; eauto.
exploit eval_addressing_inject; try eexact ADDR; try eassumption.
rewrite <- restrict_sm_all.
apply local_in_all.
apply restrict_sm_WD; trivial. eassumption.
eapply regset_get_list; eassumption.
rewrite eval_shift_stack_addressing; simpl; rewrite Int.add_zero_l.
intros [a' [ADDR' G1]].
exploit (Mem.storev_mapped_inject (restrict (as_inj mu) (vis mu)));
try eassumption.
eapply RI.
intros [m'' [P Q]].
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Istore; eauto.
rewrite <- ADDR'. apply eval_addressing_preserved.
exact symbols_preserved.
assert (SMV': sm_valid mu m' m'').
split; intros;
eapply storev_valid_block_1; try eassumption;
eapply SMV; assumption.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (storev_freshloc _ _ _ _ _ P);
try rewrite (storev_freshloc _ _ _ _ _ H1); intuition.
split.
split.
econstructor; eauto.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
eapply add_store_satisfiable; eauto. exists valu1; assumption.
exploit wt_instr_at; eauto. intros WTI; inv WTI.
eapply Val.has_subtype; eauto.
rewrite vis_restrict_sm, restrict_sm_all,
restrict_nest in *; trivial.
exists spb, spb'; auto.
specialize (RI src).
intuition.
destruct a; inv H1.
eapply REACH_Store; try eassumption.
inv G1. destruct (restrictD_Some _ _ _ _ _ H6); trivial.
intros bb' Hbb'. rewrite getBlocks_char in Hbb'. destruct Hbb' as [off Hoff].
destruct Hoff; try contradiction. subst.
rewrite H1 in RI. inv RI.
destruct (restrictD_Some _ _ _ _ _ H8); trivial.
exploit (Mem.storev_mapped_inject (as_inj mu)). eassumption. eassumption.
eapply val_inject_incr; try eassumption. eapply restrict_incr.
eapply val_inject_incr; try eassumption. eapply restrict_incr.
intros [m2'' [ST2 INJ]]. rewrite ST2 in P. inv P. eassumption.
intuition.
apply StoreEffectD in H4. destruct H4 as [i [VADDR' _]]. subst.
inv G1; inv H1. eapply visPropagateR; eassumption.
eapply StoreEffect_PropagateLeft; eassumption. }
{ (* Icall *)
TransfInstr; intros C.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]].
destruct SP as [spb [spb' [B [B' SP]]]]. subst sp; subst sp'.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
exploit find_function_translated; eauto. intros [tf [FIND' TRANSF']].
exploit find_function_preserved'; eauto.
clear FIND'; intros FIND'.
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Icall; eauto.
apply sig_preserved; auto.
exploit wt_instr_at; eauto. intros WTI; inv WTI.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
econstructor; eauto.
intros. eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
apply empty_numbering_satisfiable.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
exists spb, spb'; auto.
eapply Val.has_subtype_list; eauto. apply wt_regset_list; auto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
apply regset_get_list; assumption. }
{ (* Itailcall *)
TransfInstr; intros C.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]].
destruct SP as [spb [spb' [B [B' SP]]]]. subst sp'. inv B.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))).
rewrite <- restrict_sm_all.
eapply restrict_sm_preserves_globals; try eassumption.
unfold vis. intuition.
assert (GFPR: globalfunction_ptr_inject ge (restrict (as_inj mu) (vis mu))).
eapply restrict_GFP_vis; eassumption.
rewrite restrict_sm_local in SP.
destruct (restrictD_Some _ _ _ _ _ SP).
exploit (free_parallel_inject (as_inj mu)); eauto.
apply local_in_all; eassumption.
intros [m'' [P Q]]. simpl in *. rewrite Zplus_0_r in P.
exploit find_function_translated; eauto. intros [tf [FIND' TRANSF']].
exploit find_function_preserved'; eauto.
clear FIND'; intros FIND'.
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Itailcall; eauto.
apply sig_preserved; auto.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
rewrite (freshloc_free _ _ _ _ _ P).
rewrite (freshloc_free _ _ _ _ _ H2).
repeat split; extensionality bb; intuition.
assert (SMV': sm_valid mu m' m'').
split; intros;
eapply Mem.valid_block_free_1; try eassumption;
eapply SMV; assumption.
exploit wt_instr_at; eauto. intros WTI; inv WTI.
split.
split.
econstructor; eauto.
replace (proj_sig_res (funsig fd)) with (proj_sig_res (fn_sig f)). auto.
unfold proj_sig_res. rewrite H8; auto.
eapply Val.has_subtype_list; eauto. apply wt_regset_list; auto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
apply regset_get_list; assumption.
intuition.
eapply REACH_closed_free; eassumption.
destruct (restrictD_Some _ _ _ _ _ SP).
apply local_in_all in H4; trivial.
intuition.
apply FreeEffectD in H6. destruct H6 as [? [VB Arith2]]; subst.
eapply visPropagate; eassumption.
simpl in H6.
eapply FreeEffect_PropagateLeft; eassumption. }
{ (* Ibuiltin *)
TransfInstr; intros C.
destruct SP as [spb [spb' [B [B' SP]]]]. subst sp. subst sp'.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
assert (ArgsInj:= regset_get_list _ _ _ args RI).
exploit (inlineable_extern_inject ge tge); try eapply PRE;
try eassumption; try apply GDE_lemma.
intros [mu' [v' [m'' [TEC [ResInj [MINJ' [UNMAPPED [LOOR
[INC [SEP [GSEP [LOCALLOC [WD' [SMV' RC']]]]]]]]]]]]]].
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Ibuiltin; eauto.
exists mu'.
split; trivial.
split; trivial.
split; trivial.
split.
split.
{ econstructor; eauto.
eapply wt_exec_Ibuiltin; eauto. eapply wt_instr_at; eauto.
eapply analysis_correct_1; eauto. simpl; auto.
unfold transfer; rewrite H.
assert (CASE1: numbering_satisfiable ge (Vptr spb Int.zero)
(rs#res <- v) m' empty_numbering).
{ apply empty_numbering_satisfiable. }
assert (CASE2: m' = m -> numbering_satisfiable ge
(Vptr spb Int.zero) (rs#res <- v) m'
(add_unknown approx#pc res)).
{ intros. subst m'. apply add_unknown_satisfiable.
eapply wf_analyze; eauto. auto. }
assert (CASE3: numbering_satisfiable ge (Vptr spb Int.zero)
(rs#res <- v) m'
(add_unknown (kill_loads approx#pc) res)).
{ apply add_unknown_satisfiable. apply wf_kill_equations.
eapply wf_analyze; eauto.
eapply kill_loads_satisfiable; eauto. }
destruct ef; auto; apply CASE2; inv H0; auto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
eapply regset_set; try eassumption.
eapply regset_incr; try eassumption.
eapply intern_incr_restrict; eassumption.
exists spb, spb'. split; trivial. split; trivial.
rewrite restrict_sm_local in *.
destruct (restrictD_Some _ _ _ _ _ SP).
apply restrictI_Some.
eapply INC; eassumption.
eapply intern_incr_vis; eassumption.
eapply match_stackframes_intern_incr; try eassumption.
eapply restrict_sm_intern_incr; eassumption.
apply restrict_sm_WD; trivial. }
intuition.
apply intern_incr_as_inj in INC; trivial.
apply sm_inject_separated_mem in SEP; trivial.
eapply meminj_preserves_incr_sep; eassumption.
red; intros b fb Hb. destruct (H3 _ _ Hb).
split; trivial.
eapply intern_incr_as_inj; eassumption.
rewrite <- (intern_incr_frgnBlocksSrc _ _ INC). eauto.
split; trivial.
split; trivial.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD INJ]]]]]].
intros. eapply BuiltinEffect_Propagate; eassumption. }
{ (* Icond *)
TransfInstr; intros C.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD INJ]]]]]].
destruct (valnum_regs approx!!pc args) as [n1 vl] eqn:?.
assert (wf_numbering approx!!pc). eapply wf_analyze; eauto.
elim SAT; intros valu1 NH1.
exploit valnum_regs_holds; eauto. intros [valu2 [NH2 [EQ AG]]].
assert (wf_numbering n1). eapply wf_valnum_regs; eauto.
destruct (reduce condition combine_cond n1 cond args vl) as [cond' args'] eqn:?.
assert (RES: eval_condition cond' rs##args' m = Some b).
eapply reduce_sound with (sem := fun cond vl => eval_condition cond vl m); eauto.
intros; eapply combine_cond_sound; eauto.
assert (ArgsInj:= regset_get_list _ _ _ args' RI).
assert (RES': eval_condition cond' rs'##args' m2 = Some b).
eapply eval_condition_inject; eauto.
eapply inject_restrict; eassumption.
clear RES.
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Icond; eauto.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
destruct b; eapply analysis_correct_1; eauto; simpl; auto;
unfold transfer; rewrite H; auto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial. }
{ (* Ijumptable *)
TransfInstr; intros C.
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Ijumptable; eauto.
specialize (RI arg). rewrite H0 in RI; inv RI. trivial.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
eapply analysis_correct_1; eauto. simpl. eapply list_nth_z_in; eauto.
unfold transfer; rewrite H; auto. }
{ (* Ireturn *)
TransfInstr; intros C.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD INJ]]]]]].
assert (MInjR : Mem.inject (restrict (as_inj mu) (vis mu)) m m2).
eapply inject_restrict; eassumption.
destruct SP as [spb [spb' [B [B' Rsp]]]]; subst. inv B.
rewrite restrict_sm_local in Rsp.
destruct (restrictD_Some _ _ _ _ _ Rsp) as [LocSp visSp].
assert (SP:= local_in_all _ WD _ _ _ LocSp).
exploit free_parallel_inject; eauto.
apply restrictI_Some; eassumption.
simpl; rewrite Zplus_0_r; intros [m'' [P Q]].
assert (SMV': sm_valid mu m' m'').
split; intros;
eapply Mem.valid_block_free_1; try eassumption;
eapply SMV; assumption.
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_Ireturn; eauto.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality bb;
try rewrite (freshloc_free _ _ _ _ _ P);
try rewrite (freshloc_free _ _ _ _ _ H0); intuition.
split.
split.
econstructor; eauto.
exploit wt_instr_at; eauto. intros WTI; inv WTI; simpl. auto.
unfold proj_sig_res; rewrite H2. eapply Val.has_subtype; eauto.
destruct or; simpl. apply RI. constructor.
intuition.
eapply REACH_closed_free; eassumption.
eapply free_free_inject; try eassumption.
simpl. rewrite Zplus_0_r. apply P.
intuition.
eapply FreeEffectD in H1. destruct H1 as [? [VB Arith]]; subst.
eapply visPropagate; eassumption.
eapply FreeEffect_PropagateLeft; eassumption. }
{ (* internal function *)
monadInv H8. unfold transf_function in EQ.
destruct (type_function f) as [tyenv|] eqn:?; try discriminate.
destruct (analyze f) as [approx|] eqn:?; inv EQ.
assert (WTF: wt_function f tyenv). apply type_function_correct; auto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
destruct PRE as [RC [PG [GFP [Glob [SMV [WD INJ]]]]]].
assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))).
rewrite <- restrict_sm_all.
eapply restrict_sm_preserves_globals; try eassumption.
unfold vis. intuition.
exploit (alloc_parallel_intern mu); try eassumption. apply Zle_refl. apply Zle_refl.
intros [mu' [m2' [stk' [Alloc' [INJ' [IntInc' [HA [HB [HC [HD [HE [HF HG]]]]]]]]]]]].
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_function_internal; eauto.
exists mu'.
split. assumption.
split. solve[eapply intern_incr_globals_separate; eauto].
split. assumption.
assert (IncVis: inject_incr (restrict (as_inj mu) (vis mu)) (restrict (as_inj mu') (vis mu'))).
intros ? ? ? Rb. destruct (restrictD_Some _ _ _ _ _ Rb).
eapply restrictI_Some.
eapply intern_incr_as_inj; try eassumption.
eapply intern_incr_vis; eassumption.
assert (DomSP:= alloc_DomSrc _ _ _ SMV _ _ _ _ H).
assert (TgtB2: DomTgt mu stk' = false).
remember (DomTgt mu stk') as d.
destruct d; trivial; apply eq_sym in Heqd.
elim (Mem.fresh_block_alloc _ _ _ _ _ Alloc').
apply SMV. assumption.
assert (locSP: local_of mu' stk = Some (stk', 0)).
destruct (joinD_Some _ _ _ _ _ HA) as [EXT | [EXT LOC]]; trivial.
rewrite <- (intern_incr_extern _ _ IntInc') in EXT.
assert (DomSrc mu stk = true). eapply extern_DomRng'; eassumption.
congruence.
split.
split; simpl.
econstructor; eauto.
apply wt_init_regs. inv WTF. eapply Val.has_subtype_list; eauto.
apply analysis_correct_entry; auto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
eapply regset_init_regs.
eapply val_list_inject_incr; eassumption.
exists stk, stk'. rewrite restrict_sm_local'; auto.
eapply match_stackframes_intern_incr; try eassumption.
eapply restrict_sm_intern_incr; eassumption.
apply restrict_sm_WD; trivial.
intuition.
eapply meminj_preserves_incr_sep_vb with (m0:=m)(tm:=m2). eapply PG.
intros. apply as_inj_DomRng in H1.
split; eapply SMV; eapply H1.
assumption.
apply intern_incr_as_inj; eassumption.
apply sm_inject_separated_mem. assumption.
assumption.
red; intros. destruct (GFP _ _ H1). split; trivial.
eapply intern_incr_as_inj; eassumption.
rewrite <- (intern_incr_frgnBlocksSrc _ _ IntInc'). auto.
intuition. }
{ (* external function : only nonobservables*)
monadInv H8.
specialize (EFhelpers _ _ OBS); intros OBS'.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
exploit (inlineable_extern_inject ge tge); try eapply PRE; try eassumption; try apply GDE_lemma.
intros [mu' [v' [m'' [TEC [ResInj [MINJ' [UNMAPPED [LOOR [INC [SEP [GSEP [LOCALLOC [WD' [SMV' RC']]]]]]]]]]]]]].
destruct PRE as [RC [PG [GFP [Glob [SMV [WD INJ]]]]]].
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_function_external; eauto.
exists mu'.
split. assumption.
split. assumption.
split. assumption.
split.
split.
econstructor; eauto.
eapply match_stackframes_intern_incr; try eassumption.
eapply restrict_sm_intern_incr; eassumption.
apply restrict_sm_WD; trivial.
simpl. eapply external_call_well_typed; eauto.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
intuition.
eapply meminj_preserves_incr_sep_vb with (m0:=m)(tm:=m2). eapply PG.
intros ? ? ? AI. apply as_inj_DomRng in AI.
split; eapply SMV; eapply AI.
assumption.
apply intern_incr_as_inj; eassumption.
apply sm_inject_separated_mem. assumption.
assumption.
intros ? ? Hb. destruct (GFP _ _ Hb). split; trivial.
eapply intern_incr_as_inj; eassumption.
rewrite <- (intern_incr_frgnBlocksSrc _ _ INC). auto.
split. trivial. split. trivial.
intros. eapply BuiltinEffect_Propagate; try eassumption. }
{ (* return *)
inv H1.
eexists; eexists; eexists; split.
eapply rtl_effstep_exec_return; eauto.
exists mu.
split. apply intern_incr_refl.
split. solve [apply gsep_refl].
split. apply sm_locally_allocatedChar.
repeat split; extensionality b';
try rewrite (freshloc_irrefl); intuition.
split. 2: intuition.
split. 2: intuition.
econstructor; eauto.
apply wt_regset_assign; auto. eapply Val.has_subtype; eauto.
eapply regset_set; assumption. }
Qed.
Lemma MATCH_initial: forall v
(vals1 : list val) c1 (m1 : mem) (j : meminj)
(vals2 : list val) (m2 : mem) (DomS DomT : Values.block -> bool)
(Ini : initial_core (rtl_eff_sem hf) ge v vals1 = Some c1)
(Inj: Mem.inject j m1 m2)
(VInj: Forall2 (val_inject j) vals1 vals2)
(PG: meminj_preserves_globals ge j)
(J: forall b1 b2 d, j b1 = Some (b2, d) ->
DomS b1 = true /\ DomT b2 = true)
(RCH: forall b, REACH m2
(fun b' : Values.block => isGlobalBlock tge b' || getBlocks vals2 b') b =
true -> DomT b = true)
(GFI: globalfunction_ptr_inject ge j)
(GDE: genvs_domain_eq ge tge)
(HDomS: forall b : Values.block, DomS b = true -> Mem.valid_block m1 b)
(HDomT: forall b : Values.block, DomT b = true -> Mem.valid_block m2 b),
exists c2,
initial_core (rtl_eff_sem hf) tge v vals2 = Some c2 /\
MATCH
(initial_SM DomS DomT
(REACH m1
(fun b : Values.block => isGlobalBlock ge b || getBlocks vals1 b))
(REACH m2
(fun b : Values.block => isGlobalBlock tge b || getBlocks vals2 b))
j) c1 m1 c2 m2.
Proof.
intros.
inversion Ini.
unfold RTL_initial_core in H0. unfold ge in *. unfold tge in *.
destruct v; inv H0.
remember (Int.eq_dec i Int.zero) as z; destruct z; inv H1. clear Heqz.
remember (Genv.find_funct_ptr (Genv.globalenv prog) b) as zz; destruct zz; inv H0.
apply eq_sym in Heqzz.
destruct f; try discriminate.
case_eq (val_casted.val_has_type_list_func vals1
(sig_args (funsig (Internal f)))
&& val_casted.vals_defined vals1).
2: solve[intros H2; rewrite H2 in H1; inv H1].
intros H2; rewrite H2 in H1.
simpl; revert H1; case_eq
(zlt (match match Zlength vals1 with 0%Z => 0%Z
| Z.pos y' => Z.pos y'~0 | Z.neg y' => Z.neg y'~0
end
with 0%Z => 0%Z
| Z.pos y' => Z.pos y'~0~0 | Z.neg y' => Z.neg y'~0~0
end) Int.max_unsigned).
intros l _.
2: solve[inversion 2].
simpl. inversion 1. inv H1. clear H3.
exploit funct_ptr_translated; eauto. intros [tf [FP TF]].
unfold tge in FP. rewrite FP. (*monadInv TF. *)
unfold rtl_eff_sem, rtl_coop_sem. simpl.
case_eq (Int.eq_dec Int.zero Int.zero). intros ? e.
assert (Zlength vals2 = Zlength vals1) as ->.
{ apply forall_inject_val_list_inject in VInj. clear - VInj.
induction VInj; auto. rewrite !Zlength_cons, IHVInj; auto. }
unfold transf_fundef in TF. simpl in TF.
(* change (fn_sig (transf_function f)) with (funsig (Internal x)).*)
assert (val_casted.val_has_type_list_func vals2
(sig_args (funsig tf)) =true) as ->.
{ eapply val_casted.val_list_inject_hastype; eauto.
eapply forall_inject_val_list_inject; eauto.
destruct (val_casted.vals_defined vals1); auto.
rewrite andb_comm in H2; simpl in H2. solve[inv H2].
assert (sig_args (funsig tf)
= sig_args (funsig (Internal f))) as ->.
{ specialize (sig_preserved (Internal f)). simpl. intros.
rewrite (H0 _ TF). reflexivity. }
destruct (val_casted.val_has_type_list_func vals1
(sig_args (funsig (Internal f)))); auto. }
assert (val_casted.vals_defined vals2=true) as ->.
{ eapply val_casted.val_list_inject_defined.
eapply forall_inject_val_list_inject; eauto.
destruct (val_casted.vals_defined vals1); auto.
rewrite andb_comm in H2; inv H2. }
simpl. monadInv TF. eexists; split.
simpl; revert H1; case_eq
(zlt (match match Zlength vals1 with 0%Z => 0%Z
| Z.pos y' => Z.pos y'~0 | Z.neg y' => Z.neg y'~0
end
with 0%Z => 0%Z
| Z.pos y' => Z.pos y'~0~0 | Z.neg y' => Z.neg y'~0~0
end) Int.max_unsigned).
solve[simpl; auto].
intros CONTRA. solve[elimtype False; auto].
2: intros CONTRA; solve[elimtype False; auto].
clear e e0.
destruct (core_initial_wd ge tge _ _ _ _ _ _ _ Inj
VInj J RCH PG GDE HDomS HDomT _ (eq_refl _))
as [AA [BB [CC [DD [EE [FF GG]]]]]].
remember (val_casted.val_has_type_list_func vals1 (sig_args (funsig (Internal f))) &&
val_casted.vals_defined vals1) as vc.
destruct vc; inv H2.
split. simpl.
eapply match_states_call; try eassumption.
econstructor.
apply eq_sym in Heqvc. apply andb_true_iff in Heqvc.
apply val_casted.val_has_type_list_func_charact.
apply Heqvc.
unfold transf_fundef, transf_partial_fundef, bind.
rewrite EQ. trivial.
unfold vis, initial_SM; simpl.
eapply val_list_inject_incr.
Focus 2. apply forall_inject_val_list_inject.
eapply restrict_forall_vals_inject; try eassumption.
intros bb Hbb. apply Hbb.
red; intros. destruct (restrictD_Some _ _ _ _ _ H2).
apply restrictI_Some.
unfold as_inj; simpl. apply joinI. left.
apply restrictI_Some. assumption.
apply REACH_nil. rewrite H4; intuition.
apply REACH_nil. rewrite H4; intuition.
rewrite initial_SM_as_inj.
intuition.
Qed.
Theorem transl_program_correct:
SM_simulation.SM_simulation_inject (rtl_eff_sem hf)
(rtl_eff_sem hf) ge tge.
Proof.
intros.
assert (GDE:= GDE_lemma).
eapply inj_simulation_plus_typed with
(match_states:=fun x mu st m st' m' => MATCH mu st m st' m')
(measure:=fun x => O).
(*genvs_dom_eq*)
assumption.
(*match_wd*)
intros; apply H.
(*match_visible*)
intros. apply H.
(*match_restrict
intros x. apply MATCH_restrict.*)
(*match_valid*)
intros. apply H.
(*match_genv*)
intros x. eapply MATCH_PG.
(*initial_core*)
{ intros.
eapply (MATCH_initial _ _ _); eauto. }
(*halted*)
{ intros. destruct H as [MC [RC [PG [GFP [Glob [SMV [WD INJ]]]]]]].
destruct c1; inv H0. destruct stack; inv H1.
inv MC. exists v'.
split. assumption.
rewrite vis_restrict_sm, restrict_sm_all, restrict_nest in *; trivial.
split. eassumption.
simpl. inv H1. trivial. }
(* at_external*)
{ apply MATCH_atExternal. }
(* after_external*)
{ apply MATCH_afterExternal. }
(*effcore_diagram*)
{ intros. exploit MATCH_effcore_diagram; try eassumption.
intros [st2' [m2' [U2 [CS' [mu' [INC [GSEP
[LOCALLOC [MTCH [WD' [SVM' UH]]]]]]]]]]].
exists st2', m2', mu'.
repeat (split; trivial).
exists U2. split; trivial.
left. apply effstep_plus_one; assumption. }
Qed.
End PRESERVATION.
|
{"author": "PrincetonUniversity", "repo": "compcomp", "sha": "eebb7d5a95fed97775cef7f014399be78abbe7bf", "save_path": "github-repos/coq/PrincetonUniversity-compcomp", "path": "github-repos/coq/PrincetonUniversity-compcomp/compcomp-eebb7d5a95fed97775cef7f014399be78abbe7bf/backend/CSEproof_comp.v"}
|
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
from typing import Callable, NamedTuple, Set
import numpy as np
from sktime.distances.base._types import DistanceCallable
class NumbaDistance(ABC):
"""Abstract class to define a numba compatible distance metric."""
def distance(self, x: np.ndarray, y: np.ndarray, **kwargs: dict) -> float:
"""Compute the distance between two timeseries.
Parameters
----------
x: np.ndarray (2d array)
First timeseries.
y: np.ndarray (2d array)
Second timeseries.
kwargs: dict
kwargs for the distance computation.
Returns
-------
float
Distance between x and y.
"""
dist_callable = self.distance_factory(x, y, **kwargs)
return dist_callable(x, y)
def distance_factory(
self, x: np.ndarray, y: np.ndarray, **kwargs: dict
) -> DistanceCallable:
"""Create a no_python distance.
This method will validate the kwargs and ensure x and y are in the correct
format and then return a no_python compiled distance that uses the kwargs.
The no_python compiled distance will be in the form:
Callable[[np.ndarray, np.ndarray], float]. #
This can then be used to to calculate distances efficiently or can be used
inside other no_python functions.
Parameters
----------
x: np.ndarray (2d array)
First timeseries
y: np.ndarray (2d array)
Second timeseries
kwargs: kwargs
kwargs for the given distance metric
Returns
-------
Callable[[np.ndarray, np.ndarray], float]
Callable where two, numpy 2d arrays are taken as parameters (x and y),
a float is then returned that represents the distance between x and y.
This callable will be no_python compiled.
Raises
------
ValueError
If x or y is not a numpy array.
If x or y has less than or greater than 2 dimensions.
RuntimeError
If the distance metric could not be compiled to no_python.
"""
NumbaDistance._validate_factory_timeseries(x)
NumbaDistance._validate_factory_timeseries(y)
no_python_callable = self._distance_factory(x, y, **kwargs)
return no_python_callable
@staticmethod
def _validate_factory_timeseries(x: np.ndarray) -> None:
"""Ensure the timeseries are correct format.
Parameters
----------
x: np.ndarray (2d array)
A timeseries to check.
Raises
------
ValueError
If x is not a numpy array.
If x has less than or greater than 2 dimensions.
"""
if not isinstance(x, np.ndarray):
raise ValueError(
f"The array {x} is not a numpy array. Please ensure it"
f"is a 2d numpy and try again."
)
if x.ndim != 2:
raise ValueError(
f"The array {x} has the incorrect number of dimensions."
f"Ensure the array has exactly 2 dimensions and try "
f"again."
)
@abstractmethod
def _distance_factory(
self, x: np.ndarray, y: np.ndarray, **kwargs: dict
) -> DistanceCallable:
"""Abstract method to create a no_python compiled distance.
_distance_factory should validate kwargs and then compile a no_python callable
that takes (x, y) as parameters and returns a float that represents the distance
between the two timeseries.
Parameters
----------
x: np.ndarray (2d array)
First timeseries
y: np.ndarray (2d array)
Second timeseries
kwargs: kwargs
kwargs for the given distance metric
Returns
-------
Callable[[np.ndarray, np.ndarray], float]
Callable where two, numpy 2d arrays are taken as parameters (x and y),
a float is then returned that represents the distance between x and y.
This callable will be no_python compiled.
"""
...
# Metric
class MetricInfo(NamedTuple):
"""Define a registry entry for a metric."""
# Name of the distance
canonical_name: str
# All aliases, including canonical_name
aka: Set[str]
# Python distance function (can use numba inside but callable must be in python)
dist_func: Callable[[np.ndarray, np.ndarray], float]
# NumbaDistance class
dist_instance: NumbaDistance
|
{"hexsha": "e7e836639eab0646d004fa5ca3040d4c4005af05", "size": 4653, "ext": "py", "lang": "Python", "max_stars_repo_path": "sktime/distances/base/_base.py", "max_stars_repo_name": "Tomiiwa/sktime", "max_stars_repo_head_hexsha": "9c7600287e7d52556784a3da3a3c83f1a7499610", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sktime/distances/base/_base.py", "max_issues_repo_name": "Tomiiwa/sktime", "max_issues_repo_head_hexsha": "9c7600287e7d52556784a3da3a3c83f1a7499610", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sktime/distances/base/_base.py", "max_forks_repo_name": "Tomiiwa/sktime", "max_forks_repo_head_hexsha": "9c7600287e7d52556784a3da3a3c83f1a7499610", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6530612245, "max_line_length": 88, "alphanum_fraction": 0.5993982377, "include": true, "reason": "import numpy", "num_tokens": 971}
|
# Removing t = 0, such that Σ is invertible
t = Vector(0.1:0.1:100); p = 2;
# Creating generators U,V that result in a positive-definite matrix Σ
Ut, Vt = spline_kernel(t', p)
K = SymEGRQSMatrix(Ut,Vt,ones(size(Ut,2)))
x = randn(size(K,1))
Kfull = Matrix(K)
# Testing multiplication
@test K*x ≈ Kfull*x
@test K'*x ≈ Kfull'*x
# Testing linear solve
@test K\x ≈ Kfull\x
# Testing (log)determinant
@test logdet(K) ≈ logdet(Kfull)
@test det(K) ≈ det(Kfull)
# Testing show
@test Matrix(K) ≈ tril(K.Ut'*K.Vt) + triu(K.Vt'*K.Ut,1) + Diagonal(K.d)
@test Kfull[3,1] ≈ K[3,1]
@test Kfull[2,2] ≈ K[2,2]
@test Kfull[1,3] ≈ K[1,3]
|
{"hexsha": "49273cf24d083419a905d24b55e3e9bb3361ba79", "size": 624, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_SymEGRQSMatrix.jl", "max_stars_repo_name": "mipals/SymEGRSSMatrices", "max_stars_repo_head_hexsha": "67e59dfc74c692ca13ce603385cf1af2e6742e56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-12T13:20:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-12T13:20:06.000Z", "max_issues_repo_path": "test/test_SymEGRQSMatrix.jl", "max_issues_repo_name": "mipals/SymEGRSSMatrices.jl", "max_issues_repo_head_hexsha": "67e59dfc74c692ca13ce603385cf1af2e6742e56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_SymEGRQSMatrix.jl", "max_forks_repo_name": "mipals/SymEGRSSMatrices.jl", "max_forks_repo_head_hexsha": "67e59dfc74c692ca13ce603385cf1af2e6742e56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1111111111, "max_line_length": 71, "alphanum_fraction": 0.6490384615, "num_tokens": 250}
|
import .brown
universes v u
open category_theory
local notation f ` ∘ `:80 g:80 := g ≫ f
namespace homotopy_theory.cofibrations
open precofibration_category cofibration_category
open homotopy_theory.weak_equivalences
variables {C : Type u} [category.{v} C] [cofibration_category.{v} C]
[has_initial_object.{v} C]
-- Following Rădulescu-Banu, Cofibrations in Homotopy Theory, Lemma 1.4.1
variables {a₁ a₂ a₃ a₄ b₁ b₂ b₃ b₄ : C}
{f₁₂ : a₁ ⟶ a₂} {f₁₃ : a₁ ⟶ a₃} {f₂₄ : a₂ ⟶ a₄} {f₃₄ : a₃ ⟶ a₄}
(po_f : Is_pushout f₁₂ f₁₃ f₂₄ f₃₄)
{g₁₂ : b₁ ⟶ b₂} {g₁₃ : b₁ ⟶ b₃} {g₂₄ : b₂ ⟶ b₄} {g₃₄ : b₃ ⟶ b₄}
(po_g : Is_pushout g₁₂ g₁₃ g₂₄ g₃₄)
{u₁ : a₁ ⟶ b₁} {u₂ : a₂ ⟶ b₂} {u₃ : a₃ ⟶ b₃} -- u₄ will be the induced map of pushouts
(ha₁ : cofibrant a₁) (ha₃ : cofibrant a₃) (hb₁ : cofibrant b₁) (hb₃ : cofibrant b₃)
(hf₁₂ : is_cof f₁₂) (hg₁₂ : is_cof g₁₂)
(hwu₁ : is_weq u₁) (hwu₂ : is_weq u₂) (hwu₃ : is_weq u₃)
(s₁₂ : f₁₂ ≫ u₂ = u₁ ≫ g₁₂) (s₁₃ : f₁₃ ≫ u₃ = u₁ ≫ g₁₃)
lemma gluing_weq_aux (hcu₁ : is_cof u₁) (hcu₃ : is_cof u₃)
(hcu₂'' : is_cof ((pushout_by_cof f₁₂ u₁ hf₁₂).is_pushout.induced u₂ g₁₂ s₁₂)) :
is_weq (pushout_of_maps po_f po_g u₁ u₂ u₃ s₁₂ s₁₃) :=
have acof_u₁ : is_acof u₁ := ⟨hcu₁, hwu₁⟩,
have acof_u₃ : is_acof u₃ := ⟨hcu₃, hwu₃⟩,
let po₁₂ := pushout_by_cof f₁₂ u₁ hf₁₂,
u₂' := po₁₂.map₀,
u₂'' := po₁₂.is_pushout.induced u₂ g₁₂ s₁₂,
u₄ := pushout_of_maps po_f po_g u₁ u₂ u₃ s₁₂ s₁₃,
po₃₄ := pushout_by_cof f₃₄ u₃ (pushout_is_cof po_f hf₁₂),
u₄' := po₃₄.map₀,
u₄'' := po₃₄.is_pushout.induced u₄ g₃₄ (by simp) in
have acof_u₂' : is_acof u₂' := pushout_is_acof po₁₂.is_pushout.transpose acof_u₁,
have acof_u₄' : is_acof u₄' := pushout_is_acof po₃₄.is_pushout.transpose acof_u₃,
have acof_u₂'' : is_acof u₂'' := have _ := hwu₂, begin
refine ⟨hcu₂'', category_with_weak_equivalences.weq_of_comp_weq_left acof_u₂'.2 _⟩,
simpa using this
end,
let k := pushout_of_maps po₁₂.is_pushout po₃₄.is_pushout f₁₃ f₂₄ g₁₃ po_f.commutes s₁₃.symm in
suffices Is_pushout u₂'' k g₂₄ u₄'',
by convert weq_comp acof_u₄'.2 (pushout_is_acof this acof_u₂'').2; simp,
have _ := Is_pushout_of_Is_pushout_of_Is_pushout po_f po₃₄.is_pushout,
have Is_pushout f₁₂ (u₁ ≫ g₁₃) (u₂' ≫ k) po₃₄.map₁ := begin
convert this using 1,
{ exact s₁₃.symm },
{ simp }
end,
have Is_pushout po₁₂.map₁ g₁₃ k po₃₄.map₁ :=
Is_pushout_of_Is_pushout_of_Is_pushout' po₁₂.is_pushout this (by simp),
have po_g' : Is_pushout (po₁₂.map₁ ≫ u₂'') g₁₃ g₂₄ (po₃₄.map₁ ≫ u₄'') := by convert po_g using 1; simp,
Is_pushout_of_Is_pushout_of_Is_pushout_vert' this po_g' $
by apply po₁₂.is_pushout.uniqueness; rw [←category.assoc, ←category.assoc]; simp [po_g.commutes]
lemma gluing_weq : is_weq (pushout_of_maps po_f po_g u₁ u₂ u₃ s₁₂ s₁₃) :=
let ⟨c₁⟩ := exists_brown_factorization ha₁ hb₁ u₁,
⟨c₂, h₁₂, hv₂, hr₂, hw₂, x, y⟩ :=
exists_relative_brown_factorization
ha₁ hb₁ (cofibrant_of_cof ha₁ hf₁₂) (cofibrant_of_cof hb₁ hg₁₂) u₁ u₂ f₁₂ g₁₂ s₁₂.symm c₁,
⟨c₃, h₁₃, hv₃, hr₃, hw₃, _, _⟩ :=
exists_relative_brown_factorization ha₁ hb₁ ha₃ hb₃ u₁ u₃ f₁₃ g₁₃ s₁₃.symm c₁,
po := pushout_by_cof c₁.f' f₁₂ c₁.hf' in
have cof_h₁₂ : is_cof h₁₂ := begin
convert cof_comp (pushout_is_cof po.is_pushout.transpose hf₁₂) (x hg₁₂) using 1,
simp
end,
have wv : _ := gluing_weq_aux po_f (pushout_by_cof h₁₂ h₁₃ cof_h₁₂).is_pushout hf₁₂
(c₁.weq_f' hwu₁) (c₂.weq_f' hwu₂) (c₃.weq_f' hwu₃) hv₂.symm hv₃.symm c₁.hf' c₃.hf'
(by rw ←Is_pushout.transpose_induced; exact cof_comp (cof_iso _) (x hg₁₂)),
have ww : _ := gluing_weq_aux po_g (pushout_by_cof h₁₂ h₁₃ cof_h₁₂).is_pushout hg₁₂
c₁.hs.2 c₂.hs.2 c₃.hs.2 hw₂.symm hw₃.symm c₁.hs.1 c₃.hs.1
(by rw ←Is_pushout.transpose_induced; exact cof_comp (cof_iso _) (y hf₁₂).1),
let po_h := pushout_by_cof h₁₂ h₁₃ cof_h₁₂ in
have wr : is_weq (pushout_of_maps po_h.is_pushout po_g c₁.r c₂.r c₃.r hr₂.symm hr₃.symm), begin
refine (weq_iff_weq_inv _).mp ww,
rw ←pushout_of_maps_comp,
convert pushout_of_maps_id po_g,
{ exact c₁.hsr }, { exact c₂.hsr }, { exact c₃.hsr }
end,
begin
convert weq_comp wv wr,
rw ←pushout_of_maps_comp,
congr,
{ exact c₁.hf'r.symm }, { exact c₂.hf'r.symm}, { exact c₃.hf'r.symm }
end
end homotopy_theory.cofibrations
|
{"author": "rwbarton", "repo": "lean-homotopy-theory", "sha": "39e1b4ea1ed1b0eca2f68bc64162dde6a6396dee", "save_path": "github-repos/lean/rwbarton-lean-homotopy-theory", "path": "github-repos/lean/rwbarton-lean-homotopy-theory/lean-homotopy-theory-39e1b4ea1ed1b0eca2f68bc64162dde6a6396dee/src/homotopy_theory/formal/cofibrations/gluing.lean"}
|
import cv2
import os
import sys
import pickle
import numpy as np
from PIL import Image
sys.path.insert(0, '/Workspace-Github/face_recognition/code')
import opencv_tools
import keras
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
subjects = ["", "YANG MI", "BABY"]
def prepare_training_data(data_folder_path):
#faces,labels = opencv_tools.prepare_training_data(data_folder_path)
f = open('D:/Workspace-Github/face_recognition/serialized/data_train.file', 'rb')
data = pickle.load(f)
faces, labels = data[0], data[1]
x_train = []
for face in faces:
im = Image.fromarray(face)
imResize = im.resize((128,128), Image.ANTIALIAS)
x_train.append(np.array(imResize))
y_train = labels
return np.array(x_train), np.array(y_train)
def train_CNN(x_train, y_train):
batch_size = 50
num_classes = 2
epochs = 20
print(np.shape(x_train))
x_train = x_train.reshape(-1, 16384,1)
x_train = x_train.astype('float32')
x_train /= 255
print(x_train.shape[0], 'train samples')
y_train = keras.utils.to_categorical(y_train-1, num_classes)
img_rows, img_cols = 128, 128
x_train = x_train.reshape(x_train.shape[0], img_cols, img_rows, 1) #1 means: grey 1 layer
model = Sequential()
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(img_cols, img_rows, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten(input_shape=model.output_shape[1:])) # input: 64 layers of 4*4, output: =64*4*4=1024
model.add(Dense(64, activation='relu')) #=128
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=['accuracy'])
# check-points
filepath="/Workspace-Github/face_recognition/serialized/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
run = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=None,
callbacks=callbacks_list)
return model
def predict(test_img, model):
#make a copy of the image as we don't want to chang original image
img = test_img.copy()
#detect face from the image
face, rect = opencv_tools.detect_face_CV2(img)
im = Image.fromarray(face)
imResize = im.resize((128,128), Image.ANTIALIAS)
y_test = (np.array(imResize)/255).reshape(1,128,128,1).astype('float32')
#predict the image using our face recognizer
y_label = np.argmax(model.predict(y_test, verbose=0))+1
print(y_label)
#get name of respective label returned by face recognizer
label_text = subjects[y_label]
#draw a rectangle around face detected
opencv_tools.draw_rectangle(img, rect)
#draw name of predicted person
opencv_tools.draw_text(img, label_text, rect[0], rect[1]-5)
return img
|
{"hexsha": "682d443793dbb88e8d42967168fcba44bce7a690", "size": 3419, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/CNN_tools.py", "max_stars_repo_name": "yc930401/face_recognition", "max_stars_repo_head_hexsha": "475b9d8766bd76657d83f899e77d2688694fd010", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-11-01T14:50:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-13T17:06:47.000Z", "max_issues_repo_path": "code/CNN_tools.py", "max_issues_repo_name": "yc930401/face_recognition", "max_issues_repo_head_hexsha": "475b9d8766bd76657d83f899e77d2688694fd010", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/CNN_tools.py", "max_forks_repo_name": "yc930401/face_recognition", "max_forks_repo_head_hexsha": "475b9d8766bd76657d83f899e77d2688694fd010", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-04-10T03:15:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T06:05:51.000Z", "avg_line_length": 34.5353535354, "max_line_length": 108, "alphanum_fraction": 0.6656917227, "include": true, "reason": "import numpy", "num_tokens": 888}
|
[STATEMENT]
lemma classes_above_ifields:
"\<lbrakk> classes_above P C \<inter> classes_changed P P' = {} \<rbrakk>
\<Longrightarrow>
ifields P C = ifields P' C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. classes_above P C \<inter> classes_changed P P' = {} \<Longrightarrow> ifields P C = ifields P' C
[PROOF STEP]
by (simp add: ifields_def classes_above_fields)
|
{"llama_tokens": 140, "file": "Regression_Test_Selection_JinjaSuppl_ClassesAbove", "length": 1}
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib qt
def camera_calibration():
nx=9
ny=6
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
#print(objp)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Finding the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# adding object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
return objpoints,imgpoints,ret,mtx,dist
def thresh_binaryImage(image,ret,mtx,dist):
# Read in an image and grayscale it
#image = cv2.imread('test_images/test5.jpg')
# Do camera calibration given object points and image points
#ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
dst = cv2.undistort(image, mtx, dist, None, mtx)
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(dst, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Grayscale image
# NOTE: we already saw that standard grayscaling lost color information for the lane lines
# Explore gradients in other colors spaces / color channels to see what might work better
gray = cv2.cvtColor(dst, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
# Plotting thresholded images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Stacked thresholds')
ax1.imshow(color_binary)
ax2.set_title('Distorted and Combined S channel and gradient thresholds')
ax2.imshow(combined_binary, cmap='gray')
return combined_binary
def Perspective_Transform(img_size,combined_binary):
src = np.float32([[510,465], [787,465] , [1055,625] , [215,625]])
offset = 50
dest = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
M = cv2.getPerspectiveTransform(src, dest)
Minv=cv2.getPerspectiveTransform(dest, src)
warped = cv2.warpPerspective(combined_binary, M, img_size, flags=cv2.INTER_LINEAR)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(combined_binary)
ax1.set_title('ColorCorrected Image', fontsize=50)
ax2.imshow(warped)
ax2.set_title('Transformed Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
return warped
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
#print(nonzero)
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
#print('Left X:',leftx_current)
#print('Right X:',rightx_current)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
#print('leftL',left_lane_inds)
#print('rightL',right_lane_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
#print('currentLX',leftx_current)
#print('currentRX',rightx_current)
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
#print('left lane',left_lane_inds)
#print('rightLane',right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
#print('leftx',leftx, 'lefty',lefty, 'rightx',rightx, 'righty',righty)
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
left_fit = np.polyfit(lefty, leftx, 2)
print(left_fit)
right_fit = np.polyfit(righty, rightx, 2)
print(right_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.imshow(out_img)
return left_fitx,right_fitx
def fit_poly(img_shape, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
#print(ploty)
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def search_around_poly(binary_warped):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Plot the polynomial lines onto the image
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
## End visualization steps ##
plt.imshow(result)
return result
def generate_data(ym_per_pix, xm_per_pix):
'''
Generates fake data to use for calculating lane curvature.
In your own project, you'll ignore this function and instead
feed in the output of your lane detection algorithm to
the lane curvature calculation.
'''
# Set random seed number so results are consistent for grader
# Comment this out if you'd like to see results on different random data!
np.random.seed(0)
# Generate some fake data to represent lane-line pixels
ploty = np.linspace(0, 719, num=720)# to cover same y-range as image
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
return ploty, left_fit_cr, right_fit_cr
def measure_curvature_real():
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = np.sqrt((1+(((2*left_fit_cr[0]*y_eval*ym_per_pix)+left_fit_cr[1])**2))**3)/np.absolute(2*left_fit_cr[0]) ## Implement the calculation of the left line here
right_curverad =np.sqrt((1+(((2*right_fit_cr[0]*y_eval*ym_per_pix)+right_fit_cr[1])**2))**3)/np.absolute(2*right_fit_cr[0]) ## Implement the calculation of the right line here
print(left_curverad, 'm', right_curverad, 'm')
return left_curverad, right_curverad
|
{"hexsha": "6697d640a63867bd713984669d124db7e22dfae0", "size": 14871, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/pipeline.py", "max_stars_repo_name": "PrabhaSNR/ND_Project2_AdvancedLaneFinding", "max_stars_repo_head_hexsha": "e303d71a0c071b0042b72cefe0cedf525d42b407", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/pipeline.py", "max_issues_repo_name": "PrabhaSNR/ND_Project2_AdvancedLaneFinding", "max_issues_repo_head_hexsha": "e303d71a0c071b0042b72cefe0cedf525d42b407", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/pipeline.py", "max_forks_repo_name": "PrabhaSNR/ND_Project2_AdvancedLaneFinding", "max_forks_repo_head_hexsha": "e303d71a0c071b0042b72cefe0cedf525d42b407", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1043478261, "max_line_length": 180, "alphanum_fraction": 0.6921525116, "include": true, "reason": "import numpy", "num_tokens": 4152}
|
// Copyright (C) 2014, Pawel Tomulik <ptomulik@meil.pw.edu.pl>
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_TEST_MODULE test_tml_xxx
#include <boost/test/unit_test.hpp>
#include <yaul/tml/xxx.hpp>
BOOST_AUTO_TEST_CASE(foo)
{
BOOST_CHECK(true);
}
|
{"hexsha": "cba9ec16486e263c471614f7160ba7ef754c0f36", "size": 378, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "template/unit_test.cpp", "max_stars_repo_name": "ptomulik/yaul-tml", "max_stars_repo_head_hexsha": "2b8bf3f88742996bd8199375678cdebd6e3206d9", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "template/unit_test.cpp", "max_issues_repo_name": "ptomulik/yaul-tml", "max_issues_repo_head_hexsha": "2b8bf3f88742996bd8199375678cdebd6e3206d9", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2015-03-07T13:52:46.000Z", "max_issues_repo_issues_event_max_datetime": "2015-03-07T13:53:14.000Z", "max_forks_repo_path": "template/unit_test.cpp", "max_forks_repo_name": "ptomulik/yaul-tml", "max_forks_repo_head_hexsha": "2b8bf3f88742996bd8199375678cdebd6e3206d9", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2, "max_line_length": 62, "alphanum_fraction": 0.7513227513, "num_tokens": 110}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import tf_logging
from neural_compressor.utils.utility import dump_elapsed_time
from ..graph_base import GraphRewriterBase
from ..graph_util import GraphAnalyzer, GraphRewriterHelper
class GraphFoldConstantOptimizer(GraphRewriterBase):
supported_op_type = ["Add", "AddV2", "Const", "Mul", "Rsqrt", "Sub"]
def __init__(self, model=None):
super().__init__(model)
self.graph_analyzer = GraphAnalyzer()
self.graph_analyzer.graph = self.model
self.graph_info = self.graph_analyzer.parse_graph()
def _fold_value(self, end_node_name):
"""calculate values of end node of constant node sequence
there may be layers whose inputs are all constant in the graph, like:
const
> add
const
the value of add can be calculated in advance.
Args:
end_node_name: name of the end node of the sequence. e.g. add in the above examples.
Returns:
values of end node.
Raises:
ValueError: If the graph contains tensors which can't be broadcast.
"""
end_node = self.graph_info[end_node_name].node
def can_broadcast(s1, s2):
if s1.shape and s2.shape:
s1a = np.asarray(s1.shape)
s2a = np.asarray(s2.shape)
return ((s1a == 1) | (s2a == 1) | (s2a == s1a)).all()
return True
if self.graph_info[end_node_name].node.input:
if end_node.op == "Mul":
first_value = self._fold_value(list(end_node.input)[0])
first_type = first_value.dtype
fold_value = np.array(1.).astype(first_type)
for index, input in enumerate(end_node.input):
# broadcast if needed
input_value = self._fold_value(input)
input_type = input_value.dtype
if can_broadcast(fold_value, input_value):
fold_value = fold_value * input_value
else:
raise ValueError("input {} of node {} can't be broadcast".format(
input.name, end_node.name))
return fold_value.astype(first_type)
elif end_node.op == "Add" or end_node.op == "AddV2":
first_value = self._fold_value(list(end_node.input)[0])
first_type = first_value.dtype
fold_value = np.array(0.).astype(first_type).reshape(())
for index, input in enumerate(end_node.input):
# broadcast if needed
input_value = self._fold_value(input)
if can_broadcast(fold_value, input_value):
fold_value = fold_value + input_value
else:
raise ValueError("input {} of node {} can't be broadcast".format(
input.name, end_node.name))
return fold_value.astype(first_type)
elif end_node.op == "Rsqrt":
return 1 / np.sqrt(self._fold_value(end_node.input[0]))
elif end_node.op == "Sub":
first_value = self._fold_value(list(end_node.input)[0])
first_type = first_value.dtype
fold_value = np.array(0., dtype=first_type)
for index, input in enumerate(end_node.input):
# broadcast if needed
input_value = self._fold_value(input)
if first_type != input_value.dtype:
raise ValueError(
"input of node {} must be in same dtype but get {}and {}".format(
input.name, first_type, input_value.dtype))
if can_broadcast(fold_value, input_value):
fold_value = fold_value + (-1)**index * input_value
else:
raise ValueError("input {} of node {} can't be broadcast".format(
input.name, end_node.name))
return fold_value.astype(first_type)
else:
tf_logging.info(
"Currently fold-constant only support limited ops {} but face {}".format(
self.supported_op_type, end_node.op))
else:
return GraphRewriterHelper.values_from_const(end_node)
def check_all_folded(self):
"""Check the node has been folded completely.
Returns:
bool: True if the node has been folded else False.
"""
for node_name, _ in self.graph_info.items():
if self.check_const_inputs(node_name):
return False
return True
def check_const_inputs(self, node_name):
"""Check the node has the const input
Args:
node_name (string): node name
Returns:
bool: True if the node has the const input else False
"""
if node_name not in self.graph_info:
return False
node_op = self.graph_info[node_name].node.op
if node_op == "Placeholder" or node_op == "Const":
return False
if node_op not in self.supported_op_type:
return False
constant_flag = True
for input_name in self.graph_info[node_name].node.input:
input_name = GraphRewriterHelper.node_name_from_input(input_name)
input_node = self.graph_info[input_name].node
constant_flag &= input_node.op == "Const" and not input_node.input
return constant_flag
@dump_elapsed_time("Pass GraphFoldConstantOptimizer")
def do_transformation(self):
"""fold all the sequences only consist of const and self.supported_op_type
Args:
input_graph_def (graphdef): graphdef object
Returns:
[graphdef]: optimized graph
"""
while not self.check_all_folded():
for node_name, _ in self.graph_info.copy().items():
if self.check_const_inputs(node_name):
fold_value = self._fold_value(node_name)
fold_type = tf.as_dtype(fold_value.dtype)
new_constant_node = GraphRewriterHelper.create_constant_node(
node_name + "_const", fold_value, fold_type)
self.graph_analyzer.replace_constant_graph_with_constant_node(
new_constant_node, node_name)
output_graph_def = self.graph_analyzer.dump_graph()
return output_graph_def
|
{"hexsha": "b92eaac994e1c1dd55b3a92f47b0702a6a3cf561", "size": 7315, "ext": "py", "lang": "Python", "max_stars_repo_path": "neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py", "max_stars_repo_name": "kevinintel/neural-compressor", "max_stars_repo_head_hexsha": "b57645566aeff8d3c18dc49d2739a583c072f940", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 172, "max_stars_repo_stars_event_min_datetime": "2021-09-14T18:34:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T06:49:53.000Z", "max_issues_repo_path": "neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py", "max_issues_repo_name": "kevinintel/neural-compressor", "max_issues_repo_head_hexsha": "b57645566aeff8d3c18dc49d2739a583c072f940", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2021-09-14T02:26:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T08:34:04.000Z", "max_forks_repo_path": "neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py", "max_forks_repo_name": "kevinintel/neural-compressor", "max_forks_repo_head_hexsha": "b57645566aeff8d3c18dc49d2739a583c072f940", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2021-09-15T07:27:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T08:30:57.000Z", "avg_line_length": 40.8659217877, "max_line_length": 94, "alphanum_fraction": 0.5900205058, "include": true, "reason": "import numpy", "num_tokens": 1457}
|
## @ingroup Methods-Flight_Dynamics-Static_Stability-Approximations-Supporting_Functions
# extend_to_ref_area.py
#
# Created: Mar 2014, T. Momose
# Modified: Jan 2016, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
#SUAVE Imports
import numpy as np
from SUAVE.Core import Data
# ----------------------------------------------------------------------
# Method
# ----------------------------------------------------------------------
## @ingroup Methods-Flight_Dynamics-Static_Stability-Approximations-Supporting_Functions
def extend_to_ref_area(surface):
""" This method takes inputs describing the exposed portion of a trapezoidal
aerodynamic surface and calculates the dimensions of a corresponding
aerodynamic surface that extends all the way to the fuselage centerline.
Particularly used to get the vertical tail reference area for lateral
stability calculations when the dimensions of the exposed tail are known.
Assumptions:
Assumes a simple trapezoidal half-wing shape.
Source:
Unknown
Inputs:
surface - a SUAVE Wing object with the fields:
spans.projected - projected span (height for a vertical tail) of
the exposed surface [meters]
sweep - leading edge sweep of the aerodynamic surface [radians]
chords.root - chord length at the junction between the tail and
the fuselage [meters]
chords.tip - chord length at the tip of the aerodynamic surface [meters]
symmetric - Is the wing symmetric across the fuselage centerline?
exposed_root_chord_offset - the displacement from the fuselage
centerline to the exposed area's physical root chordline [meters]
Outputs:
ref_surface - a data dictionary with the fields:
spans.projected - The span/height measured from the fuselage centerline [meters]
area.reference - The area of the extended trapezoidal surface [meters**2]
aspect_ratio - The aspect ratio of the extended surface [meters]
chords.root - The chord of the extended trapezoidal surface
where it meets the fuselage centerline [meters]
root_LE_change - The change in the leading edge position of the
surface compared to the smaller surface that only extended to the
fuselage surface. This value is negative for sweptback surfaces [meters]
Properties Used:
N/A
"""
# Unpack inputs
symm = surface.symmetric
try:
b1 = surface.spans.exposed * 0.5 * (2 - symm)
except AttributeError:
b1 = surface.spans.projected * 0.5 * (2 - symm)
c_t = surface.chords.tip
c_r1 = surface.chords.root
Lambda = surface.sweeps.quarter_chord
dh_center = surface.exposed_root_chord_offset
#Compute reference area dimensions
b = b1+dh_center
c_root = c_t + (b/b1)*(c_r1-c_t)
S = 0.5*b*(c_root+c_t)
dx_LE = -dh_center*np.tan(Lambda)
AR = b**2/S
ref_surface = surface
surface.extended = Data()
surface.extended.spans = Data()
surface.extended.areas = Data()
surface.extended.chords = Data()
ref_surface.extended.origin = np.array(surface.origin) * 1.
ref_surface.extended.spans.projected = b * (1 + symm)
ref_surface.extended.areas.reference = S * (1 + symm)
ref_surface.extended.aspect_ratio = AR * (1 + symm)
ref_surface.extended.chords.root = c_root
ref_surface.extended.root_LE_change = dx_LE
ref_surface.extended.origin[0] = ref_surface.origin[0] + dx_LE
return ref_surface
|
{"hexsha": "1c4d108d3bdbcac2b7a8970a73d48ccc520c47ae", "size": 4404, "ext": "py", "lang": "Python", "max_stars_repo_path": "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Static_Stability/Approximations/Supporting_Functions/extend_to_ref_area.py", "max_stars_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_stars_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Static_Stability/Approximations/Supporting_Functions/extend_to_ref_area.py", "max_issues_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_issues_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Static_Stability/Approximations/Supporting_Functions/extend_to_ref_area.py", "max_forks_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_forks_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8695652174, "max_line_length": 192, "alphanum_fraction": 0.5374659401, "include": true, "reason": "import numpy", "num_tokens": 869}
|
from __future__ import print_function
import numpy as np
import theano
import theano.tensor as T
import lasagne
import time
import random
import argparse
import re
import glob
import sys
import os
import copy
# import matplotlib.pyplot as plt
from helpers.data_handling import DataHandler
from helpers import evaluation
import helpers.command_parser as parse
def get_file_name(predictor, args):
return args.dir + re.sub('_ml'+str(args.max_length), '_ml'+str(args.training_max_length), predictor._get_model_filename(args.number_of_batches))
def find_models(predictor, dataset, args):
if args.method == "UKNN" or args.method == "MM" or args.method == "POP":
return None
file = dataset.dirname + "models/" + get_file_name(predictor, args)
print(file)
if args.number_of_batches == "*":
file = np.array(glob.glob(file))
return file
def save_file_name(predictor, dataset, args):
if not args.save:
return None
else:
file = re.sub('_ne\*_', '_', dataset.dirname + 'results/' + get_file_name(predictor, args))
return file
def run_tests(predictor, model_file, dataset, args, get_full_recommendation_list=False, k=10):
# Load model
predictor.load(model_file)
#predictor.load_last(os.path.dirname(model_file) + '/')
# Prepare evaluator
evaluator = evaluation.Evaluator(dataset, k=k)
if get_full_recommendation_list:
k = dataset.n_items
count = 0
nb_of_dp = []
start = time.clock()
for sequence, user_id in dataset.test_set(epochs=1):
count += 1
#num_viewed = int(len(sequence) / 2)
#viewed = sequence[:num_viewed]
num_viewed = 1
#print(self.items[self.users[user_id,0]])
viewed = sequence[:num_viewed]
goal = [i[0] for i in sequence[num_viewed:]]
if args.clusters > 0:
recommendations, n = predictor.top_k_recommendations(viewed, user_id=user_id, k=k)
nb_of_dp.append(n)
else:
recommendations = predictor.top_k_recommendations(viewed, user_id=user_id, k=k)
evaluator.add_instance(goal, recommendations)
if len(goal) == 0:
raise ValueError
end = time.clock()
print('Timer: ', end-start)
if len(nb_of_dp) == 0:
evaluator.nb_of_dp = dataset.n_items
else:
evaluator.nb_of_dp = np.mean(nb_of_dp)
return evaluator
def print_results(ev, metrics, plot=True, file=None, n_batches=None, print_full_rank_comparison=False):
for m in metrics:
if m not in ev.metrics:
raise ValueError('Unkown metric: ' + m)
print(m+'@'+str(ev.k)+': ', ev.metrics[m]())
if file != None:
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, "a") as f:
f.write(str(n_batches)+"\t".join(map(str, [ev.metrics[m]() for m in metrics])) + "\n")
if print_full_rank_comparison:
with open(file+"_full_rank", "a") as f:
for data in ev.get_rank_comparison():
f.write("\t".join(map(str, data)) + "\n")
else:
print("-\t" + "\t".join(map(str, [ev.metrics[m]() for m in metrics])), file=sys.stderr)
if print_full_rank_comparison:
with open(file+"_full_rank", "a") as f:
for data in ev.get_rank_comparison():
f.write("\t".join(map(str, data)) + "\n")
def extract_number_of_epochs(filename):
m = re.search('_ne([0-9]+(\.[0-9]+)?)_', filename)
return float(m.group(1))
def get_last_tested_batch(filename):
'''If the output file exist already, it will look at the content of the file and return the last batch that was tested.
This is used to avoid testing to times the same model.
'''
if filename is not None and os.path.isfile(filename):
with open(filename) as f:
for line in f:
pass
return float(line.split()[0])
else:
return 0
def test_command_parser(parser):
parser.add_argument('-d', dest='dataset', help='Directory name of the dataset.', default='', type=str)
parser.add_argument('-i', dest='number_of_batches', help='Number of epochs, if not set it will compare all the available models', default=-1, type=int)
parser.add_argument('-k', dest='nb_of_predictions', help='Number of predictions to make. It is the "k" in "prec@k", "rec@k", etc.', default=10, type=int)
parser.add_argument('--metrics', help='List of metrics to compute, comma separated', default='sps,recall,item_coverage,user_coverage,blockbuster_share', type=str)
parser.add_argument('--save', help='Save results to a file', action='store_true')
parser.add_argument('--dir', help='Model directory.', default="", type=str)
parser.add_argument('--save_rank', help='Save the full comparison of goal and prediction ranking.', action='store_true')
def main():
args = parse.command_parser(parse.predictor_command_parser, test_command_parser)
args.training_max_length = args.max_length
# args.max_length = int(DATA_HANDLER.max_length/2)
if args.number_of_batches == -1:
args.number_of_batches = "*"
dataset = DataHandler(dirname=args.dataset)
predictor = parse.get_predictor(args)
predictor.prepare_model(dataset)
file = find_models(predictor, dataset, args)
if args.number_of_batches == "*" and args.method != "UKNN" and args.method != "MM" and args.method != "POP":
output_file = save_file_name(predictor, dataset, args)
last_tested_batch = get_last_tested_batch(output_file)
batches = np.array(map(extract_number_of_epochs, file))
sorted_ids = np.argsort(batches)
batches = batches[sorted_ids]
file = file[sorted_ids]
for i, f in enumerate(file):
if batches[i] > last_tested_batch:
evaluator = run_tests(predictor, f, dataset, args, get_full_recommendation_list=args.save_rank, k=args.nb_of_predictions)
print('-------------------')
print('(',i+1 ,'/', len(file),') results on ' + f)
print_results(evaluator, args.metrics.split(','), plot=False, file=output_file, n_batches=batches[i], print_full_rank_comparison=args.save_rank)
else:
evaluator = run_tests(predictor, file, dataset, args, get_full_recommendation_list=args.save_rank, k=args.nb_of_predictions)
print_results(evaluator, args.metrics.split(','), file=save_file_name(predictor, dataset, args), print_full_rank_comparison=args.save_rank)
if __name__ == '__main__':
main()
|
{"hexsha": "a17c50ac45f32c0d93d272ac943c5121b5859b56", "size": 6018, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "Kanika91/sequence-based-recommendations_clone", "max_stars_repo_head_hexsha": "73adddf747f1e3d986d7321c3567ee069f7b248b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "Kanika91/sequence-based-recommendations_clone", "max_issues_repo_head_hexsha": "73adddf747f1e3d986d7321c3567ee069f7b248b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "Kanika91/sequence-based-recommendations_clone", "max_forks_repo_head_hexsha": "73adddf747f1e3d986d7321c3567ee069f7b248b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-29T12:34:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-29T12:34:15.000Z", "avg_line_length": 35.8214285714, "max_line_length": 163, "alphanum_fraction": 0.7238285145, "include": true, "reason": "import numpy,import theano", "num_tokens": 1537}
|
module HardTestProblems
import BSON
import Statistics: mean
# Multiobjective problems
include("Multiobjective/RW_MOP_2021/RW_MOP_2021.jl")
include("Singleobjective/CEC2020/CEC2020.jl")
include("Bilevel/SMD/SMD.jl")
include("Bilevel/PMM/PMM.jl")
end
|
{"hexsha": "019e8b2cee4815602dca454136fd71f1fd86384f", "size": 253, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/HardTestProblems.jl", "max_stars_repo_name": "jmejia8/HardTestProblems.jl", "max_stars_repo_head_hexsha": "cde9e6c654f046fc8b9f01a434f7b213a0fab182", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/HardTestProblems.jl", "max_issues_repo_name": "jmejia8/HardTestProblems.jl", "max_issues_repo_head_hexsha": "cde9e6c654f046fc8b9f01a434f7b213a0fab182", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-08T04:09:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T04:09:36.000Z", "max_forks_repo_path": "src/HardTestProblems.jl", "max_forks_repo_name": "jmejia8/HardTestProblems.jl", "max_forks_repo_head_hexsha": "cde9e6c654f046fc8b9f01a434f7b213a0fab182", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.0714285714, "max_line_length": 52, "alphanum_fraction": 0.8063241107, "num_tokens": 84}
|
#ifndef MSRP_MESSAGE_HXX
#define MSRP_MESSAGE_HXX
#include <map>
#include <ostream>
#include <string>
#include <boost/shared_ptr.hpp>
#include <asio/buffer.hpp>
#include <rutil/Data.hxx>
#include "msrp/Header.hxx"
#include "msrp/ParseException.hxx"
namespace msrp
{
namespace parser
{
struct Message;
}
class Message
{
public:
enum Method
{
AUTH,
SEND,
REPORT,
Response
};
enum MsgStatus
{
Continued,
Complete,
Interrupted,
Streaming
};
// !cb! You may choose to instantiate Message in any way you like--the
// constructor is public--but if you use the factory method, you get the
// advantage of allocating from an object pool specialized for Message
// objects. In a relay situation especially, this results in reduced
// CPU consumption and decreased heap fragmentation. The object is
// automatically freed back to the pool on destruction. You can also
// avoid the exception-throwing lazy parser code by calling preparse().
// parse a message from network buffer contents
static boost::shared_ptr<Message> factory(const asio::const_buffer&);
static boost::shared_ptr<Message> factory();
Message();
// Create a response template from this message.
boost::shared_ptr<Message> response(unsigned int code, const std::string& phrase) const;
const std::string& transaction() const { return mTransaction; }
std::string& transaction() { return mTransaction; }
const unsigned int statusCode() const { return mStatusCode; }
unsigned int& statusCode() { return mStatusCode; }
const std::string& statusPhrase() const { return mStatusPhrase; }
std::string& statusPhrase() { return mStatusPhrase; }
const Method& method() const { return mMethod; }
Method& method() { return mMethod; }
const MsgStatus status() const { return mStatus; }
MsgStatus& status() { return mStatus; }
const resip::Data& contents() const { return mContents; }
resip::Data& contents() { return mContents; }
template<typename HeaderT>
const typename HeaderT::Value& header() const
{
return lazyStorage<HeaderT>().getConst(mHeaders);
}
template<typename HeaderT>
typename HeaderT::Value& header()
{
return lazyStorage<HeaderT>().get(mHeaders);
}
template<typename HeaderT>
typename HeaderT::Value& headerRef()
{
return lazyStorage<HeaderT>().value();
}
template<typename HeaderT>
bool exists() const
{
return lazyStorage<HeaderT>().parsed() || exists(HeaderT::Key);
}
// extension headers
const std::string& header(const std::string& key) const
{
using std::map;
using std::string;
map<string, string>::const_iterator i = mHeaders.find(key);
if (i == mHeaders.end())
{
throw ParseException(key, codeContext());
}
return i->second;
}
std::string& header(const std::string& key)
{
return mHeaders[key];
}
bool exists(const std::string& key) const
{
return mHeaders.find(key) != mHeaders.end();
}
// Parse all header contents up front. This may be desirable in
// applications where performance is not critical and you don't
// want to worry about surrounding all header<> calls in try-catch
// blocks.
void preparse();
// Prepare message for transmission -- create random transaction and
// message IDs if they are not already set.
bool prepare();
std::ostream& encodeHeader(std::ostream&) const;
std::ostream& encodeContents(std::ostream&) const;
public:
friend struct parser::Message;
// unparsed
mutable std::map<std::string, std::string> mHeaders;
// parsed
#define DefineHeader(h) h lazy##h
DefineHeader(FromPath); // From-Path
DefineHeader(ToPath); // To-Path
DefineHeader(UsePath); // Use-Path
DefineHeader(MessageId); // Message-ID
DefineHeader(ContentLength); // Content-Length
DefineHeader(ContentType); // Content-Type
DefineHeader(ByteRange); // Byte-Range
DefineHeader(Expires); // Expires
DefineHeader(MinExpires); // Min-Expires
DefineHeader(Status); // Status
DefineHeader(SuccessReport); // Success-Report
DefineHeader(FailureReport); // Failure-Report
#ifdef ENABLE_AUTHTUPLE
DefineHeader(WWWAuthenticate); // WWW-Authenticate
DefineHeader(AuthenticationInfo); // Authentication-Info
DefineHeader(Authorization); // Authorization
#endif
#undef DefineHeader
template<typename HeaderT>
const HeaderT& lazyStorage() const
{
return const_cast<Message*>(this)->lazyStorage<HeaderT>();
}
template<typename HeaderT>
HeaderT& lazyStorage()
{
std::abort();
}
unsigned int mStatusCode;
std::string mTransaction;
std::string mStatusPhrase;
resip::Data mContents;
Method mMethod;
MsgStatus mStatus;
};
std::ostream&
operator<<(std::ostream&, const Message&);
#define HeaderLinkage(h) \
template<> inline h& \
Message::lazyStorage<h>() \
{ \
return lazy##h; \
}
HeaderLinkage(FromPath);
HeaderLinkage(ToPath);
HeaderLinkage(UsePath);
HeaderLinkage(MessageId);
HeaderLinkage(ContentLength);
HeaderLinkage(ContentType);
HeaderLinkage(ByteRange);
HeaderLinkage(Expires);
HeaderLinkage(MinExpires);
HeaderLinkage(Status);
HeaderLinkage(SuccessReport);
HeaderLinkage(FailureReport);
#ifdef ENABLE_AUTHTUPLE
HeaderLinkage(WWWAuthenticate);
HeaderLinkage(AuthenticationInfo);
HeaderLinkage(Authorization);
#endif
#undef HeaderLinkage
}
#endif
// Copyright 2007 Chris Bond
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
|
{"hexsha": "73f489d72933b5026f3d82b0b586fab8b94f673a", "size": 7394, "ext": "hxx", "lang": "C++", "max_stars_repo_path": "Message.hxx", "max_stars_repo_name": "cbond/msrp", "max_stars_repo_head_hexsha": "d498f1ac8848319f4ecb617ad251e76de827a9a2", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Message.hxx", "max_issues_repo_name": "cbond/msrp", "max_issues_repo_head_hexsha": "d498f1ac8848319f4ecb617ad251e76de827a9a2", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Message.hxx", "max_forks_repo_name": "cbond/msrp", "max_forks_repo_head_hexsha": "d498f1ac8848319f4ecb617ad251e76de827a9a2", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-07-20T12:14:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-20T12:14:59.000Z", "avg_line_length": 29.9352226721, "max_line_length": 94, "alphanum_fraction": 0.6609413038, "num_tokens": 1622}
|
#!/usr/bin/env python
"""Show an example of how to re-sample high-pass DT-CWT coefficients.
"""
import os
import dtcwt
import dtcwt.compat
import dtcwt.sampling
# Use an off-screen backend for matplotlib
import matplotlib
matplotlib.use('agg')
# Import numpy and matplotlib's pyplot interface
import numpy as np
from matplotlib.pyplot import *
# Get a copy of the famous 'mandrill' image. In the default dtcwt tree, we ship
# one with the tests. The mandrill image is 512x512, floating point and has pixel
# values on the interval (0, 1].
mandrill = np.load(
os.path.join(os.path.dirname(__file__), '..', 'tests', 'mandrill.npz')
)['mandrill']
# Chop a window out
mandrill = mandrill[224:288,224:288]
# We will try to re-scale mandrill by this amount and method
scale = 1.2
scale_method = 'lanczos'
def scale_direct(im):
"""Scale image directly."""
return dtcwt.sampling.rescale(im, (im.shape[0]*scale, im.shape[1]*scale), scale_method)
def scale_highpass(im):
"""Scale image assuming it to be wavelet highpass coefficients."""
return dtcwt.sampling.rescale_highpass(im, (im.shape[0]*scale, im.shape[1]*scale), scale_method)
# Rescale mandrill directly using default (Lanczos) sampling
mandrill_direct = scale_direct(mandrill)
# Transform mandrill
mandrill_l, mandrill_h = dtcwt.compat.dtwavexfm2(mandrill, nlevels=4)
# Re-scale each component and transform back. Do this both with and without
# shifting back to DC.
mandrill_l = scale_direct(mandrill_l)
mandrill_h_a, mandrill_h_b = [], []
for h in mandrill_h:
mandrill_h_a.append(scale_direct(h))
mandrill_h_b.append(scale_highpass(h))
# Transform back
mandrill_a = dtcwt.compat.dtwaveifm2(mandrill_l, mandrill_h_a)
mandrill_b = dtcwt.compat.dtwaveifm2(mandrill_l, mandrill_h_b)
figure(figsize=(10,10))
subplot(2,2,1)
imshow(mandrill, cmap=cm.gray, clim=(0,1), interpolation='none')
axis('off')
title('Original')
subplot(2,2,2)
imshow(mandrill_direct, cmap=cm.gray, clim=(0,1), interpolation='none')
axis('off')
title('Directly up-sampled')
subplot(2,2,3)
imshow(mandrill_a, cmap=cm.gray, clim=(0,1), interpolation='none')
axis('off')
title('Up-sampled in the wavelet domain')
subplot(2,2,4)
imshow(mandrill_b, cmap=cm.gray, clim=(0,1), interpolation='none')
axis('off')
title('Up-sampled in the wavelet domain with shifting')
tight_layout()
savefig('resampling-example.png')
|
{"hexsha": "36eb2a369bf334a667e8a4805659beebe9c62845", "size": 2376, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/resampling_highpass_coefficients.py", "max_stars_repo_name": "santosh653/dtcwt", "max_stars_repo_head_hexsha": "01d9e87dc9abfa244a89c1f05aebf3dec6999f3a", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2015-01-04T09:21:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T16:25:02.000Z", "max_issues_repo_path": "examples/resampling_highpass_coefficients.py", "max_issues_repo_name": "santosh653/dtcwt", "max_issues_repo_head_hexsha": "01d9e87dc9abfa244a89c1f05aebf3dec6999f3a", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2015-04-02T13:37:07.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-07T09:57:57.000Z", "max_forks_repo_path": "examples/resampling_highpass_coefficients.py", "max_forks_repo_name": "santosh653/dtcwt", "max_forks_repo_head_hexsha": "01d9e87dc9abfa244a89c1f05aebf3dec6999f3a", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-04-16T06:22:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-07T09:17:44.000Z", "avg_line_length": 28.2857142857, "max_line_length": 100, "alphanum_fraction": 0.7441077441, "include": true, "reason": "import numpy", "num_tokens": 698}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for plotting.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import csv
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
from striplog import Legend
import utils
from notice import Notice
def get_curve_params(abbrev, fname):
"""
Builds and returns a dictionary of petrophysical parameters for
plotting purposes.
Args:
abbrev (str): A curve mnemonic or other abbreviation.
fname (str): The path to a file with the curve configuration.
Returns:
dict: A mapping of parameter:value for the curve in question.
"""
params = {'acronym': abbrev}
with open(fname, 'rU') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['acronymn'] == abbrev:
params['track'] = int(row['track'])
params['units'] = row['units']
params['xleft'] = float(row['xleft'])
params['xright'] = float(row['xright'])
params['logarithmic'] = row['logarithmic']
params['hexcolor'] = row['hexcolor']
params['fill_left_cond'] = bool(row['fill_left_cond'])
params['fill_left'] = row['fill_left']
params['fill_right_cond'] = bool(row['fill_right_cond'])
params['fill_right'] = row['fill_right']
params['xticks'] = row['xticks'].split(',')
return params
def plot_feature_well(tc, gs):
"""
Plotting function for the feature well.
Args:
tc (TransectContainer): The container for the main plot.
log (axis): A matplotlib axis.
gs (GridSpec): A matplotlib gridspec.
"""
fname = tc.settings['curve_display']
logs = tc.log.get(tc.feature_well)
if not logs:
# There was no data for this well, so there won't be a feature plot.
Notice.fail("There's no well data for feature well " + tc.feature_well)
return gs
Z = logs.data['DEPT']
curves = ['GR', 'DT',
'DPHI_SAN',
'NPHI_SAN',
'DTS',
'RT_HRLT',
'RHOB',
'DRHO']
window = tc.settings.get('curve_smooth_window') or 51
ntracks = 5
lw = 1.0
smooth = True
naxes = 0
ncurv_per_track = np.zeros(ntracks)
if getattr(tc.log, 'striplog', None):
ncurv_per_track[0] = 1
for curve in curves:
naxes += 1
params = get_curve_params(curve, fname)
ncurv_per_track[params['track']] += 1
axss = plt.subplot(gs[2:, -5])
axs0 = [axss, axss.twiny()]
axs1 = [plt.subplot(gs[2:, -4])]
axs2 = [plt.subplot(gs[2:, -3])]
axs3 = [plt.subplot(gs[2:, -2])]
axs4 = [plt.subplot(gs[2:, -1])]
axs = [axs0, axs1, axs2, axs3, axs4]
if getattr(tc.log, 'striplog', None):
legend = Legend.default()
try:
logs.striplog[tc.log.striplog].plot_axis(axs0[0], legend=legend)
except KeyError:
# In fact, this striplog doesn't exist.
Notice.fail("There is no such striplog" + tc.log.striplog)
# And move on...
axs0[0].set_ylim([Z[-1], 0])
label_shift = np.zeros(len(axs))
for curve in curves:
try:
values = logs.data[curve]
except ValueError:
Notice.warning("Curve not present: "+curve)
values = np.empty_like(Z)
values[:] = np.nan
params = get_curve_params(curve, fname)
i = params['track']
j = 0
label_shift[i] += 1
linOrlog = params['logarithmic']
sxticks = np.array(params['xticks'])
xticks = np.array(sxticks, dtype=float)
whichticks = 'major'
if linOrlog == 'log':
midline = np.log(np.mean(xticks))
xpos = midline
whichticks = 'minor'
else:
midline = np.mean(xticks)
xpos = midline
if smooth:
values = utils.rolling_median(values, window)
if curve == 'GR':
j = 1 # second axis in first track
label_shift[i] = 1
if params['fill_left_cond']:
# do the fill for the lithology track
axs[i][j].fill_betweenx(Z, params['xleft'], values,
facecolor=params['fill_left'],
alpha=1.0, zorder=11)
if (curve == 'DPHI_SAN') and params['fill_left_cond']:
# do the fill for the neutron porosity track
try:
nphi = utils.rolling_median(logs.data['NPHI_SAN'], window)
except ValueError:
Notice.warning("No NPHI in this well")
nphi = np.empty_like(Z)
nphi[:] = np.nan
axs[i][j].fill_betweenx(Z,
nphi,
values,
where=nphi >= values,
facecolor=params['fill_left'],
alpha=1.0,
zorder=11)
axs[i][j].fill_betweenx(Z,
nphi,
values,
where=nphi <= values,
facecolor='#8C1717',
alpha=0.5,
zorder=12)
if curve == 'DRHO':
blk_drho = 3.2
values += blk_drho # this is a hack to get DRHO on RHOB scale
axs[i][j].fill_betweenx(Z,
blk_drho,
values,
where=nphi <= values,
facecolor='#CCCCCC',
alpha=0.5,
zorder=12)
# fill right
if params['fill_right_cond']:
axs[i][j].fill_betweenx(Z, values, params['xright'],
facecolor=params['fill_right'],
alpha=1.0, zorder=12)
# plot curve
axs[i][j].plot(values, Z, color=params['hexcolor'],
lw=lw, zorder=13)
# set scale of curve
axs[i][j].set_xlim([params['xleft'], params['xright']])
# ------------------------------------------------- #
# curve labels
# ------------------------------------------------- #
trans = transforms.blended_transform_factory(axs[i][j].transData,
axs[i][j].transData)
magic = -Z[-1] / 12.
axs[i][j].text(xpos, magic - (magic/4)*(label_shift[i]-1),
curve,
horizontalalignment='center',
verticalalignment='bottom',
fontsize=12, color=params['hexcolor'],
transform=trans)
# curve units
units = '${}$'.format(params['units'])
if label_shift[i] <= 1:
axs[i][j].text(xpos, magic*0.5,
units,
horizontalalignment='center',
verticalalignment='top',
fontsize=12, color='k',
transform=trans)
# ------------------------------------------------- #
# scales and tickmarks
# ------------------------------------------------- #
axs[i][j].set_xscale(linOrlog)
axs[i][j].set_ylim([Z[-1], 0])
axs[i][j].axes.xaxis.set_ticks(xticks)
axs[i][j].axes.xaxis.set_ticklabels(sxticks, fontsize=8)
for label in axs[i][j].axes.xaxis.get_ticklabels():
label.set_rotation(90)
axs[i][j].tick_params(axis='x', direction='out')
axs[i][j].xaxis.tick_top()
axs[i][j].xaxis.set_label_position('top')
axs[i][j].xaxis.grid(True, which=whichticks,
linewidth=0.25, linestyle='-',
color='0.75', zorder=100)
axs[i][j].yaxis.grid(True, which=whichticks,
linewidth=0.25, linestyle='-',
color='0.75', zorder=100)
axs[i][j].yaxis.set_ticks(np.arange(0, max(Z), 100))
if i != 0:
axs[i][j].set_yticklabels("")
# ------------------------------------------------- #
# End of curve loop
# ------------------------------------------------- #
# Add Depth label
axs[0][0].text(0, 1.05, 'MD\n$m$', fontsize='10',
horizontalalignment='center',
verticalalignment='center',
transform=axs[0][0].transAxes)
axs[0][0].axes.yaxis.get_ticklabels()
axs[0][0].axes.xaxis.set_ticklabels('')
for label in axs[0][0].axes.yaxis.get_ticklabels():
label.set_rotation(90)
label.set_fontsize(10)
for label in axs[1][0].axes.xaxis.get_ticklabels():
label.set_rotation(90)
label.set_fontsize(10)
# Add Tops
try:
if os.path.exists(tc.tops_file):
tops = utils.get_tops(tc.tops_file)
topx = get_curve_params('DT', fname)
topmidpt = np.amax((topx)['xright'])
# plot tops
for i in range(ntracks):
for mkr, depth in tops.iteritems():
# draw horizontal bars at the top position
axs[i][-1].axhline(y=depth,
xmin=0.01, xmax=.99,
color='b', lw=2,
alpha=0.5,
zorder=100)
# draw text box at the right edge of the last track
axs[-1][-1].text(x=topmidpt, y=depth, s=mkr,
alpha=0.5, color='k',
fontsize='8',
horizontalalignment='center',
verticalalignment='center',
zorder=10000,
bbox=dict(facecolor='white',
edgecolor='k',
alpha=0.25,
lw=0.25),
weight='light')
except AttributeError:
Notice.warning("No tops for this well")
except TypeError:
# We didn't get a tops file so move along.
print "No tops for this well"
return gs
|
{"hexsha": "5f16da6e5d48c447fcd54440348594a3b5af85a4", "size": 10843, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_plot.py", "max_stars_repo_name": "hyperiongeo/geotransect", "max_stars_repo_head_hexsha": "559c4c2fec4b628d8f156e0b0b6d7cdb36323d64", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2015-04-20T08:55:08.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-10T05:39:33.000Z", "max_issues_repo_path": "feature_plot.py", "max_issues_repo_name": "hyperiongeo/geotransect", "max_issues_repo_head_hexsha": "559c4c2fec4b628d8f156e0b0b6d7cdb36323d64", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2015-03-24T00:44:48.000Z", "max_issues_repo_issues_event_max_datetime": "2015-12-28T16:53:02.000Z", "max_forks_repo_path": "feature_plot.py", "max_forks_repo_name": "hyperiongeo/geotransect", "max_forks_repo_head_hexsha": "559c4c2fec4b628d8f156e0b0b6d7cdb36323d64", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-08-17T22:54:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-30T13:27:51.000Z", "avg_line_length": 34.4222222222, "max_line_length": 79, "alphanum_fraction": 0.4562390482, "include": true, "reason": "import numpy", "num_tokens": 2389}
|
Davis Housing: The Print Edition is a free publication listing most/all of the commercial apartments available for lease. It is nicely laid out, lists the amenities, floor plan, map location, and rates for each apartment complex. You should also check out their website.
It can be found at the west entrance to the Coffee House and the Memorial Union information desk, among other places.
It is 8.5 x 11, approx. 120 pages, printed in color on uncoated (nonglossy) paper. This makes it inexpensive as well as highly recycling recyclable.
|
{"hexsha": "ddd39a6a0ac97389c451efd429f0094d1adf0f90", "size": 541, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Davis_Housing.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Davis_Housing.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Davis_Housing.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 77.2857142857, "max_line_length": 270, "alphanum_fraction": 0.7948243993, "num_tokens": 119}
|
/*! \file
\brief A JSON parser.
Copyright (C) 2019-2021 kaoru https://www.tetengo.org/
*/
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <filesystem>
#include <iterator>
#include <memory>
#include <optional>
#include <stdexcept>
#include <string>
#include <string_view>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <boost/core/noncopyable.hpp>
#include <tetengo/json/element.hpp>
#include <tetengo/json/json_parser.hpp>
#include <tetengo/text/encoder.hpp>
#include <tetengo/text/encoding/utf16.hpp>
#include "tetengo.property.json_parser.hpp"
namespace tetengo::property
{
class json_parser::impl : private boost::noncopyable
{
public:
// types
using value_map_type = json_parser::value_map_type;
// static functions
static const json_parser& instance()
{
static const json_parser singleton{};
return singleton;
}
// functions
value_map_type parse(tetengo::json::json_parser& parser) const
{
value_map_type value_map{};
parse_object(parser, value_map, std::filesystem::path{});
return value_map;
}
private:
// types
using element_type = tetengo::json::element;
// static functions
static bool parse_object(
tetengo::json::json_parser& parser,
value_map_type& value_map,
const std::filesystem::path& key_prefix)
{
if (!next_is_object_open(parser))
{
return false;
}
parser.next();
while (next_is_member_open(parser))
{
if (!parse_member(parser, value_map, key_prefix))
{
return false;
}
}
if (!next_is_object_close(parser))
{
return false;
}
parser.next();
return true;
}
static bool parse_member(
tetengo::json::json_parser& parser,
value_map_type& value_map,
const std::filesystem::path& key_prefix)
{
assert(next_is_member_open(parser));
const auto& member_element = parser.peek();
const auto& attributes = member_element.attributes();
const auto name_found = attributes.find("name");
if (name_found == std::end(attributes))
{
return false;
}
const auto key = key_prefix / unescape(name_found->second);
parser.next();
if (next_is_boolean(parser))
{
if (!parse_boolean(parser, value_map, key))
{
return false;
}
}
else if (next_is_number(parser))
{
if (!parse_number(parser, value_map, key))
{
return false;
}
}
else if (next_is_string(parser))
{
if (!parse_string(parser, value_map, key))
{
return false;
}
}
else if (next_is_object_open(parser))
{
if (!parse_object(parser, value_map, key))
{
return false;
}
}
else if (next_is_primitive(parser))
{
parser.next();
}
else if (next_is_structure_open(parser))
{
if (!skip_structure(parser))
{
return false;
}
}
if (!next_is_member_close(parser))
{
return false;
}
parser.next();
return true;
}
static bool
parse_boolean(tetengo::json::json_parser& parser, value_map_type& value_map, const std::filesystem::path& key)
{
assert(next_is_boolean(parser));
const auto& boolean_element = parser.peek();
const auto o_value = to_bool(boolean_element.value());
if (!o_value)
{
return false;
}
value_map.insert(std::make_pair(key.string(), *o_value));
parser.next();
return true;
}
static bool
parse_number(tetengo::json::json_parser& parser, value_map_type& value_map, const std::filesystem::path& key)
{
assert(next_is_number(parser));
const auto& number_element = parser.peek();
const auto o_value = to_integer(number_element.value());
if (!o_value)
{
return false;
}
value_map.insert(std::make_pair(key.string(), *o_value));
parser.next();
return true;
}
static bool
parse_string(tetengo::json::json_parser& parser, value_map_type& value_map, const std::filesystem::path& key)
{
assert(next_is_string(parser));
const auto& string_element = parser.peek();
value_map.insert(std::make_pair(key.string(), unescape(string_element.value())));
parser.next();
return true;
}
static bool skip_structure(tetengo::json::json_parser& parser)
{
assert(next_is_structure_open(parser));
parser.next();
std::size_t level = 1;
while (level > 0 && parser.has_next())
{
if (next_is_structure_open(parser))
{
++level;
}
else if (next_is_structure_close(parser))
{
--level;
}
parser.next();
}
return level == 0;
}
static bool next_is_object_open(const tetengo::json::json_parser& parser)
{
return next_is(
parser, element_type::type_name_type::object, element_type::type_category_type::structure_open);
}
static bool next_is_object_close(const tetengo::json::json_parser& parser)
{
return next_is(
parser, element_type::type_name_type::object, element_type::type_category_type::structure_close);
}
static bool next_is_member_open(const tetengo::json::json_parser& parser)
{
return next_is(
parser, element_type::type_name_type::member, element_type::type_category_type::structure_open);
}
static bool next_is_member_close(const tetengo::json::json_parser& parser)
{
return next_is(
parser, element_type::type_name_type::member, element_type::type_category_type::structure_close);
}
static bool next_is_boolean(const tetengo::json::json_parser& parser)
{
return next_is(parser, element_type::type_name_type::boolean, element_type::type_category_type::primitive);
}
static bool next_is_number(const tetengo::json::json_parser& parser)
{
return next_is(parser, element_type::type_name_type::number, element_type::type_category_type::primitive);
}
static bool next_is_string(const tetengo::json::json_parser& parser)
{
return next_is(parser, element_type::type_name_type::string, element_type::type_category_type::primitive);
}
static bool next_is_primitive(const tetengo::json::json_parser& parser)
{
return next_is(parser, element_type::type_category_type::primitive);
}
static bool next_is_structure_open(const tetengo::json::json_parser& parser)
{
return next_is(parser, element_type::type_category_type::structure_open);
}
static bool next_is_structure_close(const tetengo::json::json_parser& parser)
{
return next_is(parser, element_type::type_category_type::structure_close);
}
static bool next_is(
const tetengo::json::json_parser& parser,
const element_type::type_name_type name,
const element_type::type_category_type category)
{
return next_is(parser, category) && parser.peek().type().name == name;
}
static bool next_is(const tetengo::json::json_parser& parser, const element_type::type_category_type category)
{
if (!parser.has_next())
{
return false;
}
const auto& next = parser.peek();
return next.type().category == category;
}
static std::optional<bool> to_bool(const std::string& string)
{
if (string == "true")
{
return std::make_optional(true);
}
else if (string == "false")
{
return std::make_optional(false);
}
else
{
return std::nullopt;
}
}
static std::optional<std::uint32_t> to_integer(const std::string& string)
{
try
{
return std::make_optional(std::stoul(string));
}
catch (const std::invalid_argument&)
{
return std::nullopt;
}
}
static std::string unescape(const std::string_view& escaped)
{
std::string unescaped{};
for (auto i = static_cast<std::size_t>(0); i < escaped.length();)
{
if (escaped[i] == '\\')
{
++i;
if (i < escaped.length())
{
if (escaped[i] == 'u' && i + 4 < escaped.length())
{
const auto utf16_unescaped = unescape_utf16(escaped, i);
unescaped += utf16_unescaped.first;
i += utf16_unescaped.second;
}
else
{
unescaped += unescape_control_code(escaped[i]);
++i;
}
}
}
else
{
unescaped += escaped[i];
++i;
}
}
return unescaped;
}
static std::pair<std::string, std::size_t>
unescape_utf16(const std::string_view& escaped, const std::size_t offset)
{
auto index = offset;
++index;
const auto upper_code_string = escaped.substr(index, 4);
const auto o_upper_code = to_utf16_code(upper_code_string);
index += 4;
if (!o_upper_code)
{
return std::make_pair(std::string{ upper_code_string }, index - offset);
}
else if (
is_high_surrogate(*o_upper_code) && index + 5 < escaped.length() && escaped[index] == '\\' &&
escaped[index + 1] == 'u')
{
index += 2;
const auto lower_code_string = escaped.substr(index, 4);
const auto o_lower_code = to_utf16_code(lower_code_string);
index += 4;
if (!o_lower_code)
{
return std::make_pair(
unescape_utf16_codes(*o_upper_code) + std::string{ lower_code_string }, index - offset);
}
else
{
return std::make_pair(unescape_utf16_codes(*o_upper_code, *o_lower_code), index - offset);
}
}
else
{
return std::make_pair(unescape_utf16_codes(*o_upper_code), index - offset);
}
}
static std::optional<char16_t> to_utf16_code(const std::string_view& string)
{
try
{
return std::make_optional(static_cast<char16_t>(std::stoul(std::string{ string }, nullptr, 16)));
}
catch (const std::invalid_argument&)
{
return std::nullopt;
}
}
static bool is_high_surrogate(const char16_t code)
{
return 0xD800 <= code && code <= 0xDBFF;
}
static std::string unescape_utf16_codes(const char16_t upper_code, const char16_t lower_code = 0)
{
static const auto& encoder = tetengo::text::encoder<tetengo::text::encoding::utf16>::instance();
std::u16string utf16{ upper_code };
if (lower_code > 0)
{
utf16 += lower_code;
}
return encoder.decode(utf16);
}
static char unescape_control_code(const char escaped)
{
switch (escaped)
{
case '"':
return '"';
case '\\':
return '\\';
case '/':
return '/';
case 'b':
return '\b';
case 'f':
return '\f';
case 'n':
return '\n';
case 'r':
return '\r';
case 't':
return '\t';
default:
return escaped;
}
}
};
const json_parser& json_parser::instance()
{
return impl::instance();
}
json_parser::~json_parser() = default;
json_parser::value_map_type json_parser::parse(tetengo::json::json_parser& parser) const
{
return m_p_impl->parse(parser);
}
json_parser::json_parser() : m_p_impl{ std::make_unique<impl>() } {}
}
|
{"hexsha": "8222045da230412a82821c016131165a14685d0d", "size": 14545, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "library/property/cpp/src/tetengo.property.json_parser.cpp", "max_stars_repo_name": "kaorut/tetengo", "max_stars_repo_head_hexsha": "3360cce3e3f4c92b18154927685986c1fa7b4e8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "library/property/cpp/src/tetengo.property.json_parser.cpp", "max_issues_repo_name": "kaorut/tetengo", "max_issues_repo_head_hexsha": "3360cce3e3f4c92b18154927685986c1fa7b4e8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 153.0, "max_issues_repo_issues_event_min_datetime": "2019-08-11T05:26:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-23T17:24:04.000Z", "max_forks_repo_path": "library/property/cpp/src/tetengo.property.json_parser.cpp", "max_forks_repo_name": "kaorut/tetengo", "max_forks_repo_head_hexsha": "3360cce3e3f4c92b18154927685986c1fa7b4e8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2795698925, "max_line_length": 120, "alphanum_fraction": 0.4829150911, "num_tokens": 2740}
|
import numpy
from keras.utils import np_utils
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import BatchNormalization as BatchNorm
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
from tensorflow.python.keras.callbacks import TensorBoard
from ds_utils import load_dataset
from midi_utils import generate_notes, convert_to_midi
def prepare_sequences(notes, pitch_names, latent_dim):
""" Prepare the sequences used by the Neural Network """
# Create a dictionary to map pitches to integers
note_to_int = dict((note, number) for number, note in enumerate(pitch_names))
network_input = []
network_output = []
sequence_length = 100
# Create input sequences and the corresponding outputs
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
network_output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
# Reshape the input into a format compatible with LSTM layers
# network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
# Normalize input
# network_input = network_input / float(latent_dim)
normalized_input = normalized_input / float(latent_dim)
network_output = np_utils.to_categorical(network_output)
return network_input, normalized_input, network_output
def build_net(input, latent_dim):
""" Create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(input.shape[1], input.shape[2]),
recurrent_dropout=0.3,
return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3, ))
model.add(LSTM(512))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(latent_dim))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model
def train(model, x, y, epochs, batch_size, save_period):
""" Train the neural network """
filepath = './weights/model-{epoch:02d}-{loss:.3f}.h5'
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min',
period=save_period
)
tensorboard = TensorBoard(
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False
)
callbacks_list = [checkpoint, tensorboard]
model.fit(x, y, epochs=epochs, batch_size=batch_size, callbacks=callbacks_list)
def plot_model_architecture(model):
plot_model(model, 'model_architecture.png', show_shapes=True)
if __name__ == '__main__':
# First of all, we need to prepare dataset and dumps it on disk, only one time!
# notes = dump_dataset('kaggle_ds_dump.notes')
# Or, if dataset already created
notes = load_dataset('kaggle_ds_dump.notes')
latent_dim = len(set(notes))
pitch_names = sorted(set(item for item in notes))
x, x_normalized, y = prepare_sequences(notes, pitch_names, latent_dim)
# Build model
model = build_net(x_normalized, latent_dim)
# If you want contain training from current weights
model.load_weights('./best/best.h5')
# Train model
# train(model, x_normalized, y, epochs=4500, batch_size=128, save_period=250)
# And finally generate sample
raw_notes = generate_notes(model, x, pitch_names, latent_dim, generated_notes_number=500)
convert_to_midi('lstm_samples', raw_notes)
|
{"hexsha": "03a2afde1c86566aad4d8f2c0b7aa01759904edd", "size": 4059, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lstm.py", "max_stars_repo_name": "ilyamirin/OrganGrinder", "max_stars_repo_head_hexsha": "7cff1a399d1439a06ee0ba90428a6542ebddb966", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lstm.py", "max_issues_repo_name": "ilyamirin/OrganGrinder", "max_issues_repo_head_hexsha": "7cff1a399d1439a06ee0ba90428a6542ebddb966", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lstm.py", "max_forks_repo_name": "ilyamirin/OrganGrinder", "max_forks_repo_head_hexsha": "7cff1a399d1439a06ee0ba90428a6542ebddb966", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.825, "max_line_length": 93, "alphanum_fraction": 0.7176644494, "include": true, "reason": "import numpy", "num_tokens": 917}
|
from qtpy import QtCore
from qtpy.QtWidgets import QApplication
import numpy as np
from ..table_dictionary.table_dictionary_handler import TableDictionaryHandler
from ..fitting.initialization_sigma_alpha import InitializationSigmaAlpha
# from iBeatles.py.utilities.math_tools import calculate_inflection_point
class FittingInitializationHandler(object):
all_variables_initialized = True
advanced_mode = False
selection_range = {'left_range': {'x_axis': [],
'y_axis': [],
},
'right_range': {'x_axis': [],
'y_axis': [],
},
'inflection': {'x': np.NaN,
'y': np.NaN},
}
a1 = np.NaN # only used when using basic mode to calculate a2
percentage_of_data_to_remove_on_side = 10. # %
def __init__(self, parent=None, grand_parent=None):
self.parent = parent
self.grand_parent = grand_parent
def make_all_active(self):
o_table = TableDictionaryHandler(parent=self.parent,
grand_parent=self.grand_parent)
o_table.full_table_selection_tool(status=True)
self.grand_parent.fitting_ui.update_table()
self.grand_parent.fitting_ui.update_bragg_edge_plot()
def run(self):
InitializationSigmaAlpha(parent=self.parent,
grand_parent=self.grand_parent)
def finished_up_initialization(self):
self.advanced_mode = self.parent.ui.advanced_table_checkBox.isChecked()
if self.parent.sigma_alpha_initialized:
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.retrieve_parameters_and_update_table()
self.parent.update_table()
QApplication.restoreOverrideCursor()
def retrieve_parameters_and_update_table(self):
table_handler = TableDictionaryHandler(parent=self.parent,
grand_parent=self.grand_parent)
initialization_table = self.parent.initialization_table
d_spacing = self.get_d_spacing()
if np.isnan(d_spacing):
self.all_variables_initialized is False
initialization_table['d_spacing'] = d_spacing
table_handler.fill_table_with_variable(variable_name='d_spacing',
value=d_spacing,
all_keys=True)
sigma = self.get_sigma()
if np.isnan(sigma):
self.all_variables_initialized is False
table_handler.fill_table_with_variable(variable_name='sigma',
value=sigma,
all_keys=True)
alpha = self.get_alpha()
if np.isnan(alpha):
self.all_variables_initialized is False
table_handler.fill_table_with_variable(variable_name='alpha',
value=alpha,
all_keys=True)
# this function will allow to retrieve parameters that will be used by a1, a2, a5 and a6
self.isolate_left_and_right_part_of_inflection_point()
if self.advanced_mode:
a2 = self.get_a2()
if np.isnan(a2):
self.all_variables_initialized is False
initialization_table['a2'] = a2
table_handler.fill_table_with_variable(variable_name='a2',
value=a2,
all_keys=True)
a5 = self.get_a5()
if np.isnan(a5):
self.all_variables_initialized is False
initialization_table['a5'] = a5
table_handler.fill_table_with_variable(variable_name='a5',
value=a5,
all_keys=True)
a6 = self.get_a6()
if np.isnan(a6):
self.all_variables_initialized is False
initialization_table['a6'] = a6
table_handler.fill_table_with_variable(variable_name='a6',
value=a6,
all_keys=True)
a1 = self.get_a1()
if np.isnan(a1):
self.all_variables_initialized is False
initialization_table['a1'] = a1
table_handler.fill_table_with_variable(variable_name='a1',
value=a1,
all_keys=True)
else: # basic mode
a1 = self.get_a1()
if np.isnan(a1):
self.all_variables_initialized is False
initialization_table['a1'] = a1
table_handler.fill_table_with_variable(variable_name='a1',
value=a1,
all_keys=True)
a2 = self.get_a2()
if np.isnan(a2):
self.all_variables_initialized is False
initialization_table['a2'] = a2
table_handler.fill_table_with_variable(variable_name='a2',
value=a2,
all_keys=True)
self.parent.initialization_table = initialization_table
def isolate_left_and_right_part_of_inflection_point(self):
# get array of counts selected
[left_index, right_index] = self.grand_parent.fitting_bragg_edge_linear_selection
# get full x_axis
full_x_axis = self.parent.bragg_edge_data['x_axis']
# get full y_axis
full_y_axis = self.parent.bragg_edge_data['y_axis']
# # calculate inflexion point (index) using Ed's method
# y_axis = full_y_axis[left_index: right_index+1]
# inflection_point_index = calculate_inflection_point(data=y_axis)
# print(inflection_point_index)
# print(y_axis[inflection_point_index + left_index])
# for now inflection is only calculated by using center of selection
inflection_point_index = np.int(np.mean([left_index, right_index]))
self.selection_range['left_range']['y_axis'] = full_y_axis[left_index: inflection_point_index]
self.selection_range['left_range']['x_axis'] = full_x_axis[left_index: inflection_point_index]
self.selection_range['right_range']['y_axis'] = full_y_axis[inflection_point_index:]
self.selection_range['right_range']['x_axis'] = full_x_axis[inflection_point_index:]
self.selection_range['inflection']['y'] = full_y_axis[inflection_point_index]
self.selection_range['inflection']['x'] = full_x_axis[inflection_point_index]
def get_a1(self):
if self.advanced_mode:
intercept = self.a2_intercept
a2 = self.a2
a6 = self.a6
return intercept + a2 * a6
else:
left_range = self.selection_range['left_range']['y_axis']
nbr_data = len(left_range)
nbr_data_to_remove = np.int((self.percentage_of_data_to_remove_on_side / 100.) * nbr_data)
a1 = np.mean(left_range[0: -nbr_data_to_remove])
self.a1 = a1
return a1
def get_a2(self):
if self.advanced_mode:
x_axis = self.selection_range['left_range']['x_axis']
y_axis = self.selection_range['left_range']['y_axis']
nbr_data = len(x_axis)
nbr_data_to_remove = np.int((self.percentage_of_data_to_remove_on_side / 100.) * nbr_data)
x_axis_to_use = x_axis[0: nbr_data_to_remove]
y_axis_to_use = y_axis[0: nbr_data_to_remove]
[slope, interception] = np.polyfit(x_axis_to_use, y_axis_to_use, 1)
self.a2 = slope # saving it to calculate a6
self.a2_intercept = interception # saving it to calculate a1
return slope
else:
_mean_left_side = self.a1
right_range = self.selection_range['right_range']['y_axis']
nbr_data = len(right_range)
nbr_data_to_remove = np.int((self.percentage_of_data_to_remove_on_side / 100.) * nbr_data)
_mean_right_side = np.mean(right_range[nbr_data_to_remove:])
a2 = np.abs(_mean_right_side - _mean_left_side)
return a2
def get_a5(self):
x_axis = self.selection_range['right_range']['x_axis']
y_axis = self.selection_range['right_range']['y_axis']
nbr_data = len(x_axis)
nbr_data_to_remove = np.int((self.percentage_of_data_to_remove_on_side / 100.) * nbr_data)
x_axis_to_use = x_axis[nbr_data_to_remove:]
y_axis_to_use = y_axis[nbr_data_to_remove:]
[slope, interception] = np.polyfit(x_axis_to_use, y_axis_to_use, 1)
self.a5 = slope # saving it to calculate a6
return slope
def get_a6(self):
"""See docs folder for full description of formula used to get a6"""
intensity = self.selection_range['inflection']['y']
x_edge = self.selection_range['inflection']['x']
a6 = x_edge - (2. * intensity) / (self.a5 - self.a2)
self.a6 = a6 # saving it to caculate a1
return a6
def get_sigma(self):
sigma = self.parent.initialization_table['sigma']
return sigma
def get_alpha(self):
alpha = self.parent.initialization_table['alpha']
return alpha
def get_d_spacing(self):
"""
calculates the d-spacing using the lambda range selection and using the central lambda
2* d_spacing = lambda
"""
print(f"self.parent.ui.lambda_min_lineEdit.text(): {self.parent.ui.lambda_min_lineEdit.text()}")
lambda_min = np.float(str(self.parent.ui.lambda_min_lineEdit.text()))
lambda_max = np.float(str(self.parent.ui.lambda_max_lineEdit.text()))
average_lambda = np.mean([lambda_min, lambda_max])
d_spacing = average_lambda / 2.
return d_spacing
|
{"hexsha": "c83eb3030e4058f37b80dc87a71f14bbc4c1d9ac", "size": 10304, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/iBeatles/fitting/fitting_initialization_handler.py", "max_stars_repo_name": "ornlneutronimaging/iBeatles", "max_stars_repo_head_hexsha": "0a6ca1e18780cf08ad97b6cedede5a23f52bc953", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-04-27T06:58:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-21T07:12:30.000Z", "max_issues_repo_path": "src/iBeatles/fitting/fitting_initialization_handler.py", "max_issues_repo_name": "ornlneutronimaging/iBeatles", "max_issues_repo_head_hexsha": "0a6ca1e18780cf08ad97b6cedede5a23f52bc953", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 99, "max_issues_repo_issues_event_min_datetime": "2019-05-09T14:05:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T19:13:31.000Z", "max_forks_repo_path": "src/iBeatles/fitting/fitting_initialization_handler.py", "max_forks_repo_name": "ornlneutronimaging/iBeatles", "max_forks_repo_head_hexsha": "0a6ca1e18780cf08ad97b6cedede5a23f52bc953", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0571428571, "max_line_length": 104, "alphanum_fraction": 0.5845302795, "include": true, "reason": "import numpy", "num_tokens": 2050}
|
[STATEMENT]
lemma length_filtermap: "length (filtermap pred func tr) \<le> length tr"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (Filtermap.filtermap pred func tr) \<le> length tr
[PROOF STEP]
proof(induction tr)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. length (Filtermap.filtermap pred func []) \<le> length []
2. \<And>a tr. length (Filtermap.filtermap pred func tr) \<le> length tr \<Longrightarrow> length (Filtermap.filtermap pred func (a # tr)) \<le> length (a # tr)
[PROOF STEP]
case (Cons trn tr)
[PROOF STATE]
proof (state)
this:
length (Filtermap.filtermap pred func tr) \<le> length tr
goal (2 subgoals):
1. length (Filtermap.filtermap pred func []) \<le> length []
2. \<And>a tr. length (Filtermap.filtermap pred func tr) \<le> length tr \<Longrightarrow> length (Filtermap.filtermap pred func (a # tr)) \<le> length (a # tr)
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
length (Filtermap.filtermap pred func tr) \<le> length tr
goal (1 subgoal):
1. length (Filtermap.filtermap pred func (trn # tr)) \<le> length (trn # tr)
[PROOF STEP]
by (cases "pred trn") auto
[PROOF STATE]
proof (state)
this:
length (Filtermap.filtermap pred func (trn # tr)) \<le> length (trn # tr)
goal (1 subgoal):
1. length (Filtermap.filtermap pred func []) \<le> length []
[PROOF STEP]
qed auto
|
{"llama_tokens": 485, "file": "Bounded_Deducibility_Security_Filtermap", "length": 5}
|
import numpy as np
import pandas as pd
import os
import sys
sys.path.append('/home/akagi/github/RIPS_kircheis/RIPS')
import rect_grid
import cable
acsr = [ u'Bittern', u'Bluebird', u'Bluejay', u'Bobolink', u'Bunting',
u'Canary', u'Cardinal', u'Chickadee', u'Chukar', u'Cochin',
u'Condor', u'Coot', u'Curlew', u'Dipper', u'Dorking',
u'Dotterel', u'Dove', u'Drake', u'Eagle', u'Egret',
u'Falcon', u'Finch', u'Flamingo', u'Flicker', u'Grackle',
u'Grosbeak', u'Grouse', u'Guinea', u'Hawk', u'Hen',
u'Ibis', u'Kingbird', u'Kiwi', u'Lapwing', u'Lark',
u'Leghorn', u'Linnet', u'Mallard', u'Martin', u'Merlin',
u'Minorca', u'Oriole', u'Ortolan', u'Osprey', u'Parakeet',
u'Partridge', u'Peacock', u'Pelican', u'Penguin', u'Petrel',
u'Pheasant', u'Pigeon', u'Quail', u'Rail', u'Raven',
u'Redwing', u'Robin', u'Rook', u'Ruddy', u'Sparate',
u'Sparrow', u'Starling', u'Swan', u'Swanate', u'Swift',
u'Tern', u'Turkey', u'Waxwing']
acss = [ u'Avocet', u'Bittern', u'Bluebird',
u'Bluejay', u'Bobolink', u'Brant', u'Bullfinch',
u'Bunting', u'Canary', u'Canvasback', u'Cardinal',
u'Chukar', u'Condor', u'Cormorant', u'Corncrake',
u'Cuckoo', u'Curlew', u'Dipper', u'Diver',
u'Dove', u'Drake', u'Eagle', u'Egret',
u'Falcon', u'Finch', u'Flamingo', u'Flicker',
u'Gannet', u'Goldfinch', u'Grackle', u'Grosbeak',
u'Hawk', u'Hen', u'Heron', u'Hornbill',
u'Ibis', u'Joree', u'Junco', u'Kiwi',
u'Lapwing', u'Lark', u'Linnet', u'Macaw',
u'Mallard', u'Martin', u'Mockingbird', u'Nuthatch',
u'Oriole', u'Ortolan', u'Ostrich', u'Oxbird',
u'Parakeet', u'Parrot', u'Partridge', u'Peacock',
u'Pheasant', u'Phoenix', u'Plover', u'Popinjay',
u'Ptarmigan', u'Puffin', u'Rail', u'Ratite',
u'Redbird', u'Redwing', u'Ringdove', u'Roadrunner',
u'Rook', u'Ruddy', u'Sapsucker', u'Scaup',
u'Scissortail', u'Scoter', u'Seahawk', u'Snowbird',
u'Spoonbill', u'Squab', u'Starling', u'Stilt',
u'Stork', u'Tailorbird', u'Teal', u'Tern',
u'Thrasher', u'Tody', u'Toucan', u'Towhee',
u'Trogon', u'Turacos', u'Turbit', u'Wagtail',
u'Whooper', u'Widgeon', u'Woodcock']
aac = [ u'Arbutus', u'Aster', u'Bluebell', u'Bluebonnet',
u'Canna', u'Carnation', u'Cockscomb', u'Columbine',
u'Coreopsis', u'Cosmos', u'Cowslip', u'Daffodil',
u'Dahlia', u'Daisy', u'Goldenrod', u'Goldentuft',
u'Hawkweed', u'Hawthorn', u'Heuchera', u'Iris',
u'Jessamine', u'Larkspur', u'Laurel', u'Lilac',
u'Lupine', u'Magnolia', u'Marigold', u'Meadowsweet',
u'Mistletoe', u'Narcissus', u'Nasturtium', u'Orchid',
u'Oxlip', u'Pansy', u'Peachbell', u'Peony',
u'Petunia', u'Phlox', u'Poppy', u'Rose',
u'Sneezewort', u'Syringa', u'Trillium', u'Tulip',
u'Valerian', u'Verbena', u'Violet', u'Zinnia']
# Had to remove wood duck because of title() function
# ACSR
# 230
#skylark = cable.cable('Skylark', 'acsr')
acsr_df = pd.DataFrame()
for k in acsr:
cable_i = cable.cable(k, 'acsr')
acsr_df[k] = np.asarray([cable_i.I(348, i, 0.61) for i in np.arange(273+0, 273+61)])/(cable_i.I(348, 298, 0.61))
acss_df = pd.DataFrame()
for k in acss:
cable_i = cable.cable(k, 'acss')
acss_df[k] = np.asarray([cable_i.I(348, i, 0.61) for i in np.arange(273+0, 273+61)])/(cable_i.I(348, 298, 0.61))
aac_df = pd.DataFrame()
for k in aac:
cable_i = cable.cable(k, 'aac')
aac_df[k] = np.asarray([cable_i.I(348, i, 0.61) for i in np.arange(273+0, 273+61)])/(cable_i.I(348, 298, 0.61))
fill_between(acsr_df.index.values, acsr_df.min(axis=1), acsr_df.max(axis=1), color='blue', label='ACSR', alpha=1)
xlabel('Ambient temperature ($^\circ$C)')
ylabel('Fraction of rated capacity')
title('ACSR cable')
clf()
fill_between(acss_df.index.values, acss_df.min(axis=1), acss_df.max(axis=1), color='orange', label='ACSS', alpha=1)
xlabel('Ambient temperature ($^\circ$C)')
ylabel('Fraction of rated capacity')
title('ACSS cable')
clf()
fill_between(aac_df.index.values, aac_df.min(axis=1), aac_df.max(axis=1), color='red', label='AAC', alpha=1)
xlabel('Ambient temperature ($^\circ$C)')
ylabel('Fraction of rated capacity')
title('AAC cable')
ylim(0.4, 1.3)
clf()
#####################
acsr_cat = pd.concat([acsr_df.loc[50], cable_i.models['acsr'].T], axis=1)
acss_cat = pd.concat([acss_df.loc[50], cable_i.models['acss'].T], axis=1)
aac_cat = pd.concat([aac_df.loc[50], cable_i.models['aac'].T], axis=1)
# As cable diameter increases, effect of temperature on ampacity increases
scatter(acsr_cat['cable_d'], acsr_cat[50], color='blue', alpha=0.7, label='ACSR')
scatter(acss_cat['cable_d'], acss_cat[50], color='orange', alpha=0.7, label='ACSS')
scatter(aac_cat['cable_d'], aac_cat[50], color='red', alpha=0.7, label='AAC')
xlabel('Cable diameter (m)')
ylabel('Fraction of rated ampacity at 50 $^\circ$C') # at 50 C
title('Reduction in rated ampacity vs. cable diameter')
# Contour plot
# trange = np.asarray([bluebird.I(348, i, np.arange(0,4,0.01)) for i in np.arange(273+0, 273+60, 0.1)])/bluebird.I(348, 273+25, 0.61)
# trange = pd.DataFrame(trange).fillna(0).values
# cf = contourf(trange.T, cmap='jet_r')
# cb = colorbar()
# cf.ax.set_xticklabels([0, 10, 20, 30, 40, 50])
# cf.ax.set_yticklabels(np.linspace(0, 4, 8, endpoint=False))
# title('Weather effects on ampacity')
# ylabel('Wind speed (m/s)')
# xlabel('Ambient temperature (C)')
# cb.set_label('Fraction of Rated Ampacity')
def contour_plot(name, model, trange, vrange, maxtemp, a_s=0.9, e_s=0.7, levels=[0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25]):
cable_i = cable.cable(name, model)
a = np.asarray([cable_i.I(maxtemp, i, np.arange(*vrange), a_s=a_s, e_s=e_s) for i in np.arange(*trange)])/cable_i.I(maxtemp, 273+25, 0.61, a_s=a_s, e_s=e_s)
a = pd.DataFrame(a).fillna(0).values
cf = contourf(a.T, cmap='jet_r', levels=levels)
cb = colorbar()
cf.ax.set_xticklabels([0, 10, 20, 30, 40, 50])
cf.ax.set_yticklabels(np.linspace(0, 4, 8, endpoint=False))
title('Conductor Temperature: %s $^\circ$C' % (maxtemp - 273))
ylabel('Wind speed (m/s)')
xlabel('Ambient temperature ($^\circ$C)')
cb.set_label('Fraction of Rated Ampacity')
contour_plot('Bluebird', 'acsr', (273+0, 273+60, 0.1), (0,4,0.01), 273+75)
#### contour of ampacity vs. temperature and diameter
trange = (273+0, 273+60, 0.1)
drange = (0.005, 0.05, 0.001)
maxtemp = 273+75
def contour_diam(trange, drange, maxtemp, a_s=0.9, e_s=0.7, levels=[0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25]):
cable_i = cable.cable('Bluebird', 'acsr')
trange = np.arange(*trange)
drange = np.arange(*drange)
df = pd.DataFrame()
for d in drange:
cable_i.D = d
a = np.asarray([cable_i.I(maxtemp, i, 0.61, a_s=a_s, e_s=e_s) for i in trange])/cable_i.I(maxtemp, 273+25, 0.61, a_s=a_s, e_s=e_s)
df[d] = a
# a = pd.DataFrame(a).fillna(0).values
a = df.sort_index(axis=1).values
cf = contourf(a, cmap='jet_r', levels=levels)
cb = colorbar()
cf.ax.set_yticklabels([0, 10, 20, 30, 40, 50])
cf.ax.set_xticklabels(100*np.linspace(0, 0.05, 10, endpoint=False))
title('Conductor Temperature: %s $^\circ$C' % (maxtemp - 273))
xlabel('Conductor diameter (cm)')
ylabel('Ambient temperature ($^\circ$C)')
cb.set_label('Fraction of Rated Ampacity')
contour_diam((273+0, 273+60, 0.1), (0.005, 0.05, 0.001), 273+75)
|
{"hexsha": "acbb5fa68751148bd155dd5cc6294affcca6fcd7", "size": 8433, "ext": "py", "lang": "Python", "max_stars_repo_path": "temporary/cable_range_test.py", "max_stars_repo_name": "mdbartos/RIPS", "max_stars_repo_head_hexsha": "ab654138ccdcd8cb7c4ab53092132e0156812e95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-02T03:05:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-02T03:05:55.000Z", "max_issues_repo_path": "temporary/cable_range_test.py", "max_issues_repo_name": "mdbartos/RIPS", "max_issues_repo_head_hexsha": "ab654138ccdcd8cb7c4ab53092132e0156812e95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-05-13T23:35:43.000Z", "max_issues_repo_issues_event_max_datetime": "2015-05-22T00:51:23.000Z", "max_forks_repo_path": "temporary/cable_range_test.py", "max_forks_repo_name": "mdbartos/RIPS", "max_forks_repo_head_hexsha": "ab654138ccdcd8cb7c4ab53092132e0156812e95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-05-13T23:29:03.000Z", "max_forks_repo_forks_event_max_datetime": "2015-05-21T22:50:15.000Z", "avg_line_length": 45.8315217391, "max_line_length": 160, "alphanum_fraction": 0.5584015178, "include": true, "reason": "import numpy", "num_tokens": 3022}
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cblas.h>
int main()
{
//set up some data
int n=300;
float* x = malloc( n*sizeof(float) );
float* y = malloc( n*sizeof(float) );
for( int i=0; i<n; ++i )
{
x[i]=i+1;
y[i]=(i+1)*(i+1);
}
//calculate y = a*x + y
float a=7.0f;
int incx=1;
int incy=1;
cblas_saxpy( n, a, x, incx, y, incy );
//check it
int status = 0;
for( int i=0; i<n; ++i )
{
float expected = 7.0f*(i+1) + (i+1)*(i+1);
if( fabs( y[i]/expected - 1 )>1e-4 ){
printf("error matching element i=%d expected=%g received=%g\n",i+1,expected,y[i]);
status=-1;
break;
}
}
//free data
free(x);
free(y);
return status;
}
|
{"hexsha": "332dd426b29eb4769788ff7b848bb5a5a6ef44d7", "size": 807, "ext": "c", "lang": "C", "max_stars_repo_path": "src/CBLAS/test/tstCBLAS.c", "max_stars_repo_name": "murraypurves/BootsOnTheGround", "max_stars_repo_head_hexsha": "15acc4ed064e368f6af5114408f1be8a62749f32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2017-02-01T00:39:29.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-09T11:53:18.000Z", "max_issues_repo_path": "src/CBLAS/test/tstCBLAS.c", "max_issues_repo_name": "murraypurves/BootsOnTheGround", "max_issues_repo_head_hexsha": "15acc4ed064e368f6af5114408f1be8a62749f32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14.0, "max_issues_repo_issues_event_min_datetime": "2017-01-19T17:56:04.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-27T21:52:35.000Z", "max_forks_repo_path": "src/CBLAS/test/tstCBLAS.c", "max_forks_repo_name": "murraypurves/BootsOnTheGround", "max_forks_repo_head_hexsha": "15acc4ed064e368f6af5114408f1be8a62749f32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-10-03T12:13:36.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-03T12:13:36.000Z", "avg_line_length": 18.3409090909, "max_line_length": 94, "alphanum_fraction": 0.4708798017, "num_tokens": 280}
|
import glob
import os
import os.path as osp
import cv2
import random
import numpy as np
dst = 'sampled_images_test_1'
os.system('mkdir %s' % dst )
maxNum =3
with open('test.txt', 'r') as fIn:
testScenes = fIn.readlines()
testScenes = [x.strip() for x in testScenes ]
dirs = glob.glob('main*_xml1')
cnt = 0
for scene in testScenes:
cnt += 1
hdrNames = glob.glob(osp.join(dirs[0], scene, 'im_*.hdr') )
hdrNames = [x.split('/')[-1] for x in hdrNames ]
random.shuffle(hdrNames )
hdrNames = hdrNames[0: min(maxNum, len(hdrNames ) ) ]
imgs = []
for d in dirs:
for hdrName in hdrNames:
hdrName = osp.join(d, scene, hdrName )
im = cv2.imread(hdrName, -1 )
imgs.append(im )
imMean = 0
for im in imgs:
imMean += np.mean(im )
imMean = imMean / len(hdrNames )
for m in range(0, len(dirs) ):
for n in range(0, len(hdrNames ) ):
d = dirs[m]
hdrName = hdrNames[n]
im = imgs[m*len(hdrNames) + n ]
im = im / imMean * 0.5
im = (np.clip(im, 0, 1 ) ** (1.0/2.2) * 255).astype(np.uint8 )
imName = scene + '-' + hdrName.split('.')[0] + '-' + d + '.png'
imName = osp.join(dst, imName )
cv2.imwrite(imName, im )
|
{"hexsha": "20ed97ca2b00814a5d094a2cfed741433e3e7147", "size": 1338, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils_OR/DatasetCreation/copyImages.py", "max_stars_repo_name": "Jerrypiglet/Total3DUnderstanding", "max_stars_repo_head_hexsha": "655d00a988c839af3b73f8ab890c3f70c1500147", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils_OR/DatasetCreation/copyImages.py", "max_issues_repo_name": "Jerrypiglet/Total3DUnderstanding", "max_issues_repo_head_hexsha": "655d00a988c839af3b73f8ab890c3f70c1500147", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils_OR/DatasetCreation/copyImages.py", "max_forks_repo_name": "Jerrypiglet/Total3DUnderstanding", "max_forks_repo_head_hexsha": "655d00a988c839af3b73f8ab890c3f70c1500147", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7307692308, "max_line_length": 75, "alphanum_fraction": 0.5313901345, "include": true, "reason": "import numpy", "num_tokens": 409}
|
// This file is auto-generated, don't edit it. Thanks.
#include <alibabacloud/yundun_dbaudit_20180320.hpp>
#include <alibabacloud/endpoint_util.hpp>
#include <alibabacloud/open_api.hpp>
#include <boost/any.hpp>
#include <boost/throw_exception.hpp>
#include <darabonba/core.hpp>
#include <darabonba/util.hpp>
#include <iostream>
#include <map>
#include <vector>
using namespace std;
using namespace Alibabacloud_Yundun-dbaudit20180320;
Alibabacloud_Yundun-dbaudit20180320::Client::Client(const shared_ptr<Alibabacloud_OpenApi::Config>& config) : Alibabacloud_OpenApi::Client(config) {
_endpointRule = make_shared<string>("central");
checkConfig(config);
_endpoint = make_shared<string>(getEndpoint(make_shared<string>("yundun-dbaudit"), _regionId, _endpointRule, _network, _suffix, _endpointMap, _endpoint));
};
string Alibabacloud_Yundun-dbaudit20180320::Client::getEndpoint(shared_ptr<string> productId,
shared_ptr<string> regionId,
shared_ptr<string> endpointRule,
shared_ptr<string> network,
shared_ptr<string> suffix,
shared_ptr<map<string, string>> endpointMap,
shared_ptr<string> endpoint) {
if (!Darabonba_Util::Client::empty(endpoint)) {
return *endpoint;
}
if (!Darabonba_Util::Client::isUnset<map<string, string>>(endpointMap) && !Darabonba_Util::Client::empty(make_shared<string>((*endpointMap)[regionId]))) {
return (*endpointMap)[regionId];
}
return Alibabacloud_EndpointUtil::Client::getEndpointRules(productId, regionId, endpointRule, network, suffix);
}
AddLogMaskConfigResponse Alibabacloud_Yundun-dbaudit20180320::Client::addLogMaskConfigWithOptions(shared_ptr<AddLogMaskConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return AddLogMaskConfigResponse(doRPCRequest(make_shared<string>("AddLogMaskConfig"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
AddLogMaskConfigResponse Alibabacloud_Yundun-dbaudit20180320::Client::addLogMaskConfig(shared_ptr<AddLogMaskConfigRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return addLogMaskConfigWithOptions(request, runtime);
}
AssociateDbToRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::associateDbToRuleWithOptions(shared_ptr<AssociateDbToRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return AssociateDbToRuleResponse(doRPCRequest(make_shared<string>("AssociateDbToRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
AssociateDbToRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::associateDbToRule(shared_ptr<AssociateDbToRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return associateDbToRuleWithOptions(request, runtime);
}
AssociateRuleToDbResponse Alibabacloud_Yundun-dbaudit20180320::Client::associateRuleToDbWithOptions(shared_ptr<AssociateRuleToDbRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return AssociateRuleToDbResponse(doRPCRequest(make_shared<string>("AssociateRuleToDb"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
AssociateRuleToDbResponse Alibabacloud_Yundun-dbaudit20180320::Client::associateRuleToDb(shared_ptr<AssociateRuleToDbRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return associateRuleToDbWithOptions(request, runtime);
}
ChangeAgentStatusResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeAgentStatusWithOptions(shared_ptr<ChangeAgentStatusRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ChangeAgentStatusResponse(doRPCRequest(make_shared<string>("ChangeAgentStatus"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ChangeAgentStatusResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeAgentStatus(shared_ptr<ChangeAgentStatusRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return changeAgentStatusWithOptions(request, runtime);
}
ChangeLogMaskConfigStateResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeLogMaskConfigStateWithOptions(shared_ptr<ChangeLogMaskConfigStateRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ChangeLogMaskConfigStateResponse(doRPCRequest(make_shared<string>("ChangeLogMaskConfigState"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ChangeLogMaskConfigStateResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeLogMaskConfigState(shared_ptr<ChangeLogMaskConfigStateRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return changeLogMaskConfigStateWithOptions(request, runtime);
}
ChangeRulePriorityResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeRulePriorityWithOptions(shared_ptr<ChangeRulePriorityRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ChangeRulePriorityResponse(doRPCRequest(make_shared<string>("ChangeRulePriority"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ChangeRulePriorityResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeRulePriority(shared_ptr<ChangeRulePriorityRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return changeRulePriorityWithOptions(request, runtime);
}
ChangeRuleStatusResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeRuleStatusWithOptions(shared_ptr<ChangeRuleStatusRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ChangeRuleStatusResponse(doRPCRequest(make_shared<string>("ChangeRuleStatus"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ChangeRuleStatusResponse Alibabacloud_Yundun-dbaudit20180320::Client::changeRuleStatus(shared_ptr<ChangeRuleStatusRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return changeRuleStatusWithOptions(request, runtime);
}
CheckMailRegisteredResponse Alibabacloud_Yundun-dbaudit20180320::Client::checkMailRegisteredWithOptions(shared_ptr<CheckMailRegisteredRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CheckMailRegisteredResponse(doRPCRequest(make_shared<string>("CheckMailRegistered"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CheckMailRegisteredResponse Alibabacloud_Yundun-dbaudit20180320::Client::checkMailRegistered(shared_ptr<CheckMailRegisteredRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return checkMailRegisteredWithOptions(request, runtime);
}
ClearAgentRecordsResponse Alibabacloud_Yundun-dbaudit20180320::Client::clearAgentRecordsWithOptions(shared_ptr<ClearAgentRecordsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ClearAgentRecordsResponse(doRPCRequest(make_shared<string>("ClearAgentRecords"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ClearAgentRecordsResponse Alibabacloud_Yundun-dbaudit20180320::Client::clearAgentRecords(shared_ptr<ClearAgentRecordsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return clearAgentRecordsWithOptions(request, runtime);
}
ConfigInstanceNetworkResponse Alibabacloud_Yundun-dbaudit20180320::Client::configInstanceNetworkWithOptions(shared_ptr<ConfigInstanceNetworkRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ConfigInstanceNetworkResponse(doRPCRequest(make_shared<string>("ConfigInstanceNetwork"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ConfigInstanceNetworkResponse Alibabacloud_Yundun-dbaudit20180320::Client::configInstanceNetwork(shared_ptr<ConfigInstanceNetworkRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return configInstanceNetworkWithOptions(request, runtime);
}
CreateDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::createDataSourceWithOptions(shared_ptr<CreateDataSourceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateDataSourceResponse(doRPCRequest(make_shared<string>("CreateDataSource"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::createDataSource(shared_ptr<CreateDataSourceRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createDataSourceWithOptions(request, runtime);
}
CreateGradeProtectionReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createGradeProtectionReportWithOptions(shared_ptr<CreateGradeProtectionReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateGradeProtectionReportResponse(doRPCRequest(make_shared<string>("CreateGradeProtectionReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateGradeProtectionReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createGradeProtectionReport(shared_ptr<CreateGradeProtectionReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createGradeProtectionReportWithOptions(request, runtime);
}
CreateIntegratedReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createIntegratedReportWithOptions(shared_ptr<CreateIntegratedReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateIntegratedReportResponse(doRPCRequest(make_shared<string>("CreateIntegratedReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateIntegratedReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createIntegratedReport(shared_ptr<CreateIntegratedReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createIntegratedReportWithOptions(request, runtime);
}
CreateLogAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::createLogAlarmTaskWithOptions(shared_ptr<CreateLogAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateLogAlarmTaskResponse(doRPCRequest(make_shared<string>("CreateLogAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateLogAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::createLogAlarmTask(shared_ptr<CreateLogAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createLogAlarmTaskWithOptions(request, runtime);
}
CreatePCIReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createPCIReportWithOptions(shared_ptr<CreatePCIReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreatePCIReportResponse(doRPCRequest(make_shared<string>("CreatePCIReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreatePCIReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createPCIReport(shared_ptr<CreatePCIReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createPCIReportWithOptions(request, runtime);
}
CreateReportPushTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::createReportPushTaskWithOptions(shared_ptr<CreateReportPushTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateReportPushTaskResponse(doRPCRequest(make_shared<string>("CreateReportPushTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateReportPushTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::createReportPushTask(shared_ptr<CreateReportPushTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createReportPushTaskWithOptions(request, runtime);
}
CreateRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::createRuleWithOptions(shared_ptr<CreateRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateRuleResponse(doRPCRequest(make_shared<string>("CreateRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::createRule(shared_ptr<CreateRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createRuleWithOptions(request, runtime);
}
CreateRuleGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::createRuleGroupWithOptions(shared_ptr<CreateRuleGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateRuleGroupResponse(doRPCRequest(make_shared<string>("CreateRuleGroup"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateRuleGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::createRuleGroup(shared_ptr<CreateRuleGroupRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createRuleGroupWithOptions(request, runtime);
}
CreateSOXReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createSOXReportWithOptions(shared_ptr<CreateSOXReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateSOXReportResponse(doRPCRequest(make_shared<string>("CreateSOXReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateSOXReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::createSOXReport(shared_ptr<CreateSOXReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createSOXReportWithOptions(request, runtime);
}
CreateSqlRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::createSqlRuleWithOptions(shared_ptr<CreateSqlRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateSqlRuleResponse(doRPCRequest(make_shared<string>("CreateSqlRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateSqlRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::createSqlRule(shared_ptr<CreateSqlRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createSqlRuleWithOptions(request, runtime);
}
CreateSystemAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::createSystemAlarmTaskWithOptions(shared_ptr<CreateSystemAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return CreateSystemAlarmTaskResponse(doRPCRequest(make_shared<string>("CreateSystemAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
CreateSystemAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::createSystemAlarmTask(shared_ptr<CreateSystemAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return createSystemAlarmTaskWithOptions(request, runtime);
}
DeleteAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteAlarmTaskWithOptions(shared_ptr<DeleteAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DeleteAlarmTaskResponse(doRPCRequest(make_shared<string>("DeleteAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DeleteAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteAlarmTask(shared_ptr<DeleteAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return deleteAlarmTaskWithOptions(request, runtime);
}
DeleteDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteDataSourceWithOptions(shared_ptr<DeleteDataSourceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DeleteDataSourceResponse(doRPCRequest(make_shared<string>("DeleteDataSource"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DeleteDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteDataSource(shared_ptr<DeleteDataSourceRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return deleteDataSourceWithOptions(request, runtime);
}
DeleteReportPushTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteReportPushTaskWithOptions(shared_ptr<DeleteReportPushTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DeleteReportPushTaskResponse(doRPCRequest(make_shared<string>("DeleteReportPushTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DeleteReportPushTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteReportPushTask(shared_ptr<DeleteReportPushTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return deleteReportPushTaskWithOptions(request, runtime);
}
DeleteRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteRuleWithOptions(shared_ptr<DeleteRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DeleteRuleResponse(doRPCRequest(make_shared<string>("DeleteRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DeleteRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteRule(shared_ptr<DeleteRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return deleteRuleWithOptions(request, runtime);
}
DeleteRuleGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteRuleGroupWithOptions(shared_ptr<DeleteRuleGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DeleteRuleGroupResponse(doRPCRequest(make_shared<string>("DeleteRuleGroup"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DeleteRuleGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::deleteRuleGroup(shared_ptr<DeleteRuleGroupRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return deleteRuleGroupWithOptions(request, runtime);
}
DeregisterTemplatesFromRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::deregisterTemplatesFromRuleWithOptions(shared_ptr<DeregisterTemplatesFromRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DeregisterTemplatesFromRuleResponse(doRPCRequest(make_shared<string>("DeregisterTemplatesFromRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DeregisterTemplatesFromRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::deregisterTemplatesFromRule(shared_ptr<DeregisterTemplatesFromRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return deregisterTemplatesFromRuleWithOptions(request, runtime);
}
DescribeInstanceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeInstanceAttributeWithOptions(shared_ptr<DescribeInstanceAttributeRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DescribeInstanceAttributeResponse(doRPCRequest(make_shared<string>("DescribeInstanceAttribute"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DescribeInstanceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeInstanceAttribute(shared_ptr<DescribeInstanceAttributeRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return describeInstanceAttributeWithOptions(request, runtime);
}
DescribeInstancesResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeInstancesWithOptions(shared_ptr<DescribeInstancesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DescribeInstancesResponse(doRPCRequest(make_shared<string>("DescribeInstances"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DescribeInstancesResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeInstances(shared_ptr<DescribeInstancesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return describeInstancesWithOptions(request, runtime);
}
DescribeLoginTicketResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeLoginTicketWithOptions(shared_ptr<DescribeLoginTicketRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DescribeLoginTicketResponse(doRPCRequest(make_shared<string>("DescribeLoginTicket"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DescribeLoginTicketResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeLoginTicket(shared_ptr<DescribeLoginTicketRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return describeLoginTicketWithOptions(request, runtime);
}
DescribeRegionsResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeRegionsWithOptions(shared_ptr<DescribeRegionsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DescribeRegionsResponse(doRPCRequest(make_shared<string>("DescribeRegions"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DescribeRegionsResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeRegions(shared_ptr<DescribeRegionsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return describeRegionsWithOptions(request, runtime);
}
DescribeSyncInfoResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeSyncInfoWithOptions(shared_ptr<DescribeSyncInfoRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DescribeSyncInfoResponse(doRPCRequest(make_shared<string>("DescribeSyncInfo"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DescribeSyncInfoResponse Alibabacloud_Yundun-dbaudit20180320::Client::describeSyncInfo(shared_ptr<DescribeSyncInfoRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return describeSyncInfoWithOptions(request, runtime);
}
DisableLogMaskConfigsResponse Alibabacloud_Yundun-dbaudit20180320::Client::disableLogMaskConfigsWithOptions(shared_ptr<DisableLogMaskConfigsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DisableLogMaskConfigsResponse(doRPCRequest(make_shared<string>("DisableLogMaskConfigs"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DisableLogMaskConfigsResponse Alibabacloud_Yundun-dbaudit20180320::Client::disableLogMaskConfigs(shared_ptr<DisableLogMaskConfigsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return disableLogMaskConfigsWithOptions(request, runtime);
}
DissociateRulesFromDbResponse Alibabacloud_Yundun-dbaudit20180320::Client::dissociateRulesFromDbWithOptions(shared_ptr<DissociateRulesFromDbRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DissociateRulesFromDbResponse(doRPCRequest(make_shared<string>("DissociateRulesFromDb"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DissociateRulesFromDbResponse Alibabacloud_Yundun-dbaudit20180320::Client::dissociateRulesFromDb(shared_ptr<DissociateRulesFromDbRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return dissociateRulesFromDbWithOptions(request, runtime);
}
DissociateTemplatesFromRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::dissociateTemplatesFromRuleWithOptions(shared_ptr<DissociateTemplatesFromRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return DissociateTemplatesFromRuleResponse(doRPCRequest(make_shared<string>("DissociateTemplatesFromRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
DissociateTemplatesFromRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::dissociateTemplatesFromRule(shared_ptr<DissociateTemplatesFromRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return dissociateTemplatesFromRuleWithOptions(request, runtime);
}
EditLogMaskConfigResponse Alibabacloud_Yundun-dbaudit20180320::Client::editLogMaskConfigWithOptions(shared_ptr<EditLogMaskConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return EditLogMaskConfigResponse(doRPCRequest(make_shared<string>("EditLogMaskConfig"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
EditLogMaskConfigResponse Alibabacloud_Yundun-dbaudit20180320::Client::editLogMaskConfig(shared_ptr<EditLogMaskConfigRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return editLogMaskConfigWithOptions(request, runtime);
}
EnableLogMaskConfigsResponse Alibabacloud_Yundun-dbaudit20180320::Client::enableLogMaskConfigsWithOptions(shared_ptr<EnableLogMaskConfigsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return EnableLogMaskConfigsResponse(doRPCRequest(make_shared<string>("EnableLogMaskConfigs"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
EnableLogMaskConfigsResponse Alibabacloud_Yundun-dbaudit20180320::Client::enableLogMaskConfigs(shared_ptr<EnableLogMaskConfigsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return enableLogMaskConfigsWithOptions(request, runtime);
}
GetAgentFileUrlResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAgentFileUrlWithOptions(shared_ptr<GetAgentFileUrlRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetAgentFileUrlResponse(doRPCRequest(make_shared<string>("GetAgentFileUrl"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetAgentFileUrlResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAgentFileUrl(shared_ptr<GetAgentFileUrlRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getAgentFileUrlWithOptions(request, runtime);
}
GetAgentListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAgentListWithOptions(shared_ptr<GetAgentListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetAgentListResponse(doRPCRequest(make_shared<string>("GetAgentList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetAgentListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAgentList(shared_ptr<GetAgentListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getAgentListWithOptions(request, runtime);
}
GetAppointOperationResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAppointOperationWithOptions(shared_ptr<GetAppointOperationRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetAppointOperationResponse(doRPCRequest(make_shared<string>("GetAppointOperation"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetAppointOperationResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAppointOperation(shared_ptr<GetAppointOperationRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getAppointOperationWithOptions(request, runtime);
}
GetAuditCountResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAuditCountWithOptions(shared_ptr<GetAuditCountRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetAuditCountResponse(doRPCRequest(make_shared<string>("GetAuditCount"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetAuditCountResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAuditCount(shared_ptr<GetAuditCountRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getAuditCountWithOptions(request, runtime);
}
GetAuditCountDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAuditCountDistributionWithOptions(shared_ptr<GetAuditCountDistributionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetAuditCountDistributionResponse(doRPCRequest(make_shared<string>("GetAuditCountDistribution"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetAuditCountDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getAuditCountDistribution(shared_ptr<GetAuditCountDistributionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getAuditCountDistributionWithOptions(request, runtime);
}
GetBaseTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getBaseTemplateListWithOptions(shared_ptr<GetBaseTemplateListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetBaseTemplateListResponse(doRPCRequest(make_shared<string>("GetBaseTemplateList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetBaseTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getBaseTemplateList(shared_ptr<GetBaseTemplateListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getBaseTemplateListWithOptions(request, runtime);
}
GetDasUsageResponse Alibabacloud_Yundun-dbaudit20180320::Client::getDasUsageWithOptions(shared_ptr<GetDasUsageRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetDasUsageResponse(doRPCRequest(make_shared<string>("GetDasUsage"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetDasUsageResponse Alibabacloud_Yundun-dbaudit20180320::Client::getDasUsage(shared_ptr<GetDasUsageRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getDasUsageWithOptions(request, runtime);
}
GetDBAuditCountListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getDBAuditCountListWithOptions(shared_ptr<GetDBAuditCountListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetDBAuditCountListResponse(doRPCRequest(make_shared<string>("GetDBAuditCountList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetDBAuditCountListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getDBAuditCountList(shared_ptr<GetDBAuditCountListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getDBAuditCountListWithOptions(request, runtime);
}
GetGroupDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getGroupDetailWithOptions(shared_ptr<GetGroupDetailRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetGroupDetailResponse(doRPCRequest(make_shared<string>("GetGroupDetail"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetGroupDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getGroupDetail(shared_ptr<GetGroupDetailRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getGroupDetailWithOptions(request, runtime);
}
GetLicenseResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLicenseWithOptions(shared_ptr<GetLicenseRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLicenseResponse(doRPCRequest(make_shared<string>("GetLicense"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLicenseResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLicense(shared_ptr<GetLicenseRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLicenseWithOptions(request, runtime);
}
GetLogDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogDetailWithOptions(shared_ptr<GetLogDetailRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogDetailResponse(doRPCRequest(make_shared<string>("GetLogDetail"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogDetail(shared_ptr<GetLogDetailRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogDetailWithOptions(request, runtime);
}
GetLogListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogListWithOptions(shared_ptr<GetLogListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogListResponse(doRPCRequest(make_shared<string>("GetLogList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogList(shared_ptr<GetLogListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogListWithOptions(request, runtime);
}
GetLogMaskConfigsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogMaskConfigsWithOptions(shared_ptr<GetLogMaskConfigsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogMaskConfigsResponse(doRPCRequest(make_shared<string>("GetLogMaskConfigs"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogMaskConfigsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogMaskConfigs(shared_ptr<GetLogMaskConfigsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogMaskConfigsWithOptions(request, runtime);
}
GetLogQueryConditionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogQueryConditionWithOptions(shared_ptr<GetLogQueryConditionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogQueryConditionResponse(doRPCRequest(make_shared<string>("GetLogQueryCondition"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogQueryConditionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogQueryCondition(shared_ptr<GetLogQueryConditionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogQueryConditionWithOptions(request, runtime);
}
GetLogStatisticsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogStatisticsWithOptions(shared_ptr<GetLogStatisticsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogStatisticsResponse(doRPCRequest(make_shared<string>("GetLogStatistics"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogStatisticsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogStatistics(shared_ptr<GetLogStatisticsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogStatisticsWithOptions(request, runtime);
}
GetLogTopDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogTopDistributionWithOptions(shared_ptr<GetLogTopDistributionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogTopDistributionResponse(doRPCRequest(make_shared<string>("GetLogTopDistribution"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogTopDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogTopDistribution(shared_ptr<GetLogTopDistributionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogTopDistributionWithOptions(request, runtime);
}
GetLogTopStatisticsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogTopStatisticsWithOptions(shared_ptr<GetLogTopStatisticsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogTopStatisticsResponse(doRPCRequest(make_shared<string>("GetLogTopStatistics"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogTopStatisticsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogTopStatistics(shared_ptr<GetLogTopStatisticsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogTopStatisticsWithOptions(request, runtime);
}
GetLogTypeDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogTypeDistributionWithOptions(shared_ptr<GetLogTypeDistributionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetLogTypeDistributionResponse(doRPCRequest(make_shared<string>("GetLogTypeDistribution"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetLogTypeDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getLogTypeDistribution(shared_ptr<GetLogTypeDistributionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getLogTypeDistributionWithOptions(request, runtime);
}
GetNewSqlTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getNewSqlTemplateListWithOptions(shared_ptr<GetNewSqlTemplateListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetNewSqlTemplateListResponse(doRPCRequest(make_shared<string>("GetNewSqlTemplateList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetNewSqlTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getNewSqlTemplateList(shared_ptr<GetNewSqlTemplateListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getNewSqlTemplateListWithOptions(request, runtime);
}
GetReportFileUrlResponse Alibabacloud_Yundun-dbaudit20180320::Client::getReportFileUrlWithOptions(shared_ptr<GetReportFileUrlRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetReportFileUrlResponse(doRPCRequest(make_shared<string>("GetReportFileUrl"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetReportFileUrlResponse Alibabacloud_Yundun-dbaudit20180320::Client::getReportFileUrl(shared_ptr<GetReportFileUrlRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getReportFileUrlWithOptions(request, runtime);
}
GetRiskLevelDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRiskLevelDistributionWithOptions(shared_ptr<GetRiskLevelDistributionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetRiskLevelDistributionResponse(doRPCRequest(make_shared<string>("GetRiskLevelDistribution"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetRiskLevelDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRiskLevelDistribution(shared_ptr<GetRiskLevelDistributionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getRiskLevelDistributionWithOptions(request, runtime);
}
GetRiskStatisticsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRiskStatisticsWithOptions(shared_ptr<GetRiskStatisticsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetRiskStatisticsResponse(doRPCRequest(make_shared<string>("GetRiskStatistics"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetRiskStatisticsResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRiskStatistics(shared_ptr<GetRiskStatisticsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getRiskStatisticsWithOptions(request, runtime);
}
GetRuleDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRuleDetailWithOptions(shared_ptr<GetRuleDetailRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetRuleDetailResponse(doRPCRequest(make_shared<string>("GetRuleDetail"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetRuleDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRuleDetail(shared_ptr<GetRuleDetailRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getRuleDetailWithOptions(request, runtime);
}
GetRuleGroupNameResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRuleGroupNameWithOptions(shared_ptr<GetRuleGroupNameRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetRuleGroupNameResponse(doRPCRequest(make_shared<string>("GetRuleGroupName"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetRuleGroupNameResponse Alibabacloud_Yundun-dbaudit20180320::Client::getRuleGroupName(shared_ptr<GetRuleGroupNameRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getRuleGroupNameWithOptions(request, runtime);
}
GetSessionDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionDetailWithOptions(shared_ptr<GetSessionDetailRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetSessionDetailResponse(doRPCRequest(make_shared<string>("GetSessionDetail"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetSessionDetailResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionDetail(shared_ptr<GetSessionDetailRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getSessionDetailWithOptions(request, runtime);
}
GetSessionDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionDistributionWithOptions(shared_ptr<GetSessionDistributionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetSessionDistributionResponse(doRPCRequest(make_shared<string>("GetSessionDistribution"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetSessionDistributionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionDistribution(shared_ptr<GetSessionDistributionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getSessionDistributionWithOptions(request, runtime);
}
GetSessionListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionListWithOptions(shared_ptr<GetSessionListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetSessionListResponse(doRPCRequest(make_shared<string>("GetSessionList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetSessionListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionList(shared_ptr<GetSessionListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getSessionListWithOptions(request, runtime);
}
GetSessionQueryConditionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionQueryConditionWithOptions(shared_ptr<GetSessionQueryConditionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetSessionQueryConditionResponse(doRPCRequest(make_shared<string>("GetSessionQueryCondition"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetSessionQueryConditionResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSessionQueryCondition(shared_ptr<GetSessionQueryConditionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getSessionQueryConditionWithOptions(request, runtime);
}
GetSqlTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSqlTemplateListWithOptions(shared_ptr<GetSqlTemplateListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetSqlTemplateListResponse(doRPCRequest(make_shared<string>("GetSqlTemplateList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetSqlTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getSqlTemplateList(shared_ptr<GetSqlTemplateListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getSqlTemplateListWithOptions(request, runtime);
}
GetTopSqlTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getTopSqlTemplateListWithOptions(shared_ptr<GetTopSqlTemplateListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GetTopSqlTemplateListResponse(doRPCRequest(make_shared<string>("GetTopSqlTemplateList"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GetTopSqlTemplateListResponse Alibabacloud_Yundun-dbaudit20180320::Client::getTopSqlTemplateList(shared_ptr<GetTopSqlTemplateListRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return getTopSqlTemplateListWithOptions(request, runtime);
}
GradeProtectionReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::gradeProtectionReportWithOptions(shared_ptr<GradeProtectionReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return GradeProtectionReportResponse(doRPCRequest(make_shared<string>("GradeProtectionReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
GradeProtectionReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::gradeProtectionReport(shared_ptr<GradeProtectionReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return gradeProtectionReportWithOptions(request, runtime);
}
ImportDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::importDataSourceWithOptions(shared_ptr<ImportDataSourceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ImportDataSourceResponse(doRPCRequest(make_shared<string>("ImportDataSource"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ImportDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::importDataSource(shared_ptr<ImportDataSourceRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return importDataSourceWithOptions(request, runtime);
}
IntegratedReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::integratedReportWithOptions(shared_ptr<IntegratedReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return IntegratedReportResponse(doRPCRequest(make_shared<string>("IntegratedReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
IntegratedReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::integratedReport(shared_ptr<IntegratedReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return integratedReportWithOptions(request, runtime);
}
ListAssociatedRulesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listAssociatedRulesWithOptions(shared_ptr<ListAssociatedRulesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListAssociatedRulesResponse(doRPCRequest(make_shared<string>("ListAssociatedRules"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListAssociatedRulesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listAssociatedRules(shared_ptr<ListAssociatedRulesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listAssociatedRulesWithOptions(request, runtime);
}
ListDataSourceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::listDataSourceAttributeWithOptions(shared_ptr<ListDataSourceAttributeRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListDataSourceAttributeResponse(doRPCRequest(make_shared<string>("ListDataSourceAttribute"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListDataSourceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::listDataSourceAttribute(shared_ptr<ListDataSourceAttributeRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listDataSourceAttributeWithOptions(request, runtime);
}
ListDataSourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listDataSourcesWithOptions(shared_ptr<ListDataSourcesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListDataSourcesResponse(doRPCRequest(make_shared<string>("ListDataSources"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListDataSourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listDataSources(shared_ptr<ListDataSourcesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listDataSourcesWithOptions(request, runtime);
}
ListLogAlarmTasksResponse Alibabacloud_Yundun-dbaudit20180320::Client::listLogAlarmTasksWithOptions(shared_ptr<ListLogAlarmTasksRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListLogAlarmTasksResponse(doRPCRequest(make_shared<string>("ListLogAlarmTasks"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListLogAlarmTasksResponse Alibabacloud_Yundun-dbaudit20180320::Client::listLogAlarmTasks(shared_ptr<ListLogAlarmTasksRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listLogAlarmTasksWithOptions(request, runtime);
}
ListReportPushTasksResponse Alibabacloud_Yundun-dbaudit20180320::Client::listReportPushTasksWithOptions(shared_ptr<ListReportPushTasksRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListReportPushTasksResponse(doRPCRequest(make_shared<string>("ListReportPushTasks"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListReportPushTasksResponse Alibabacloud_Yundun-dbaudit20180320::Client::listReportPushTasks(shared_ptr<ListReportPushTasksRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listReportPushTasksWithOptions(request, runtime);
}
ListRuleGroupsResponse Alibabacloud_Yundun-dbaudit20180320::Client::listRuleGroupsWithOptions(shared_ptr<ListRuleGroupsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListRuleGroupsResponse(doRPCRequest(make_shared<string>("ListRuleGroups"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListRuleGroupsResponse Alibabacloud_Yundun-dbaudit20180320::Client::listRuleGroups(shared_ptr<ListRuleGroupsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listRuleGroupsWithOptions(request, runtime);
}
ListRulesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listRulesWithOptions(shared_ptr<ListRulesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListRulesResponse(doRPCRequest(make_shared<string>("ListRules"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListRulesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listRules(shared_ptr<ListRulesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listRulesWithOptions(request, runtime);
}
ListSqlTypeKeysForRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSqlTypeKeysForRuleWithOptions(shared_ptr<ListSqlTypeKeysForRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListSqlTypeKeysForRuleResponse(doRPCRequest(make_shared<string>("ListSqlTypeKeysForRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListSqlTypeKeysForRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSqlTypeKeysForRule(shared_ptr<ListSqlTypeKeysForRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listSqlTypeKeysForRuleWithOptions(request, runtime);
}
ListSqlTypesForRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSqlTypesForRuleWithOptions(shared_ptr<ListSqlTypesForRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListSqlTypesForRuleResponse(doRPCRequest(make_shared<string>("ListSqlTypesForRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListSqlTypesForRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSqlTypesForRule(shared_ptr<ListSqlTypesForRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listSqlTypesForRuleWithOptions(request, runtime);
}
ListSupportDbTypesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSupportDbTypesWithOptions(shared_ptr<ListSupportDbTypesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListSupportDbTypesResponse(doRPCRequest(make_shared<string>("ListSupportDbTypes"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListSupportDbTypesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSupportDbTypes(shared_ptr<ListSupportDbTypesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listSupportDbTypesWithOptions(request, runtime);
}
ListSystemAlarmsResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSystemAlarmsWithOptions(shared_ptr<ListSystemAlarmsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListSystemAlarmsResponse(doRPCRequest(make_shared<string>("ListSystemAlarms"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListSystemAlarmsResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSystemAlarms(shared_ptr<ListSystemAlarmsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listSystemAlarmsWithOptions(request, runtime);
}
ListSystemAlarmTasksResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSystemAlarmTasksWithOptions(shared_ptr<ListSystemAlarmTasksRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListSystemAlarmTasksResponse(doRPCRequest(make_shared<string>("ListSystemAlarmTasks"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListSystemAlarmTasksResponse Alibabacloud_Yundun-dbaudit20180320::Client::listSystemAlarmTasks(shared_ptr<ListSystemAlarmTasksRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listSystemAlarmTasksWithOptions(request, runtime);
}
ListTagKeysResponse Alibabacloud_Yundun-dbaudit20180320::Client::listTagKeysWithOptions(shared_ptr<ListTagKeysRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListTagKeysResponse(doRPCRequest(make_shared<string>("ListTagKeys"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListTagKeysResponse Alibabacloud_Yundun-dbaudit20180320::Client::listTagKeys(shared_ptr<ListTagKeysRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listTagKeysWithOptions(request, runtime);
}
ListTagResourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listTagResourcesWithOptions(shared_ptr<ListTagResourcesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListTagResourcesResponse(doRPCRequest(make_shared<string>("ListTagResources"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListTagResourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listTagResources(shared_ptr<ListTagResourcesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listTagResourcesWithOptions(request, runtime);
}
ListTemplatesForSqlRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::listTemplatesForSqlRuleWithOptions(shared_ptr<ListTemplatesForSqlRuleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListTemplatesForSqlRuleResponse(doRPCRequest(make_shared<string>("ListTemplatesForSqlRule"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListTemplatesForSqlRuleResponse Alibabacloud_Yundun-dbaudit20180320::Client::listTemplatesForSqlRule(shared_ptr<ListTemplatesForSqlRuleRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listTemplatesForSqlRuleWithOptions(request, runtime);
}
ListUsedSqlTypesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listUsedSqlTypesWithOptions(shared_ptr<ListUsedSqlTypesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ListUsedSqlTypesResponse(doRPCRequest(make_shared<string>("ListUsedSqlTypes"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ListUsedSqlTypesResponse Alibabacloud_Yundun-dbaudit20180320::Client::listUsedSqlTypes(shared_ptr<ListUsedSqlTypesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return listUsedSqlTypesWithOptions(request, runtime);
}
ModifyBaseTemplateStateResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyBaseTemplateStateWithOptions(shared_ptr<ModifyBaseTemplateStateRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyBaseTemplateStateResponse(doRPCRequest(make_shared<string>("ModifyBaseTemplateState"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyBaseTemplateStateResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyBaseTemplateState(shared_ptr<ModifyBaseTemplateStateRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyBaseTemplateStateWithOptions(request, runtime);
}
ModifyCustomNameResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyCustomNameWithOptions(shared_ptr<ModifyCustomNameRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyCustomNameResponse(doRPCRequest(make_shared<string>("ModifyCustomName"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyCustomNameResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyCustomName(shared_ptr<ModifyCustomNameRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyCustomNameWithOptions(request, runtime);
}
ModifyDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyDataSourceWithOptions(shared_ptr<ModifyDataSourceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyDataSourceResponse(doRPCRequest(make_shared<string>("ModifyDataSource"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyDataSourceResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyDataSource(shared_ptr<ModifyDataSourceRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyDataSourceWithOptions(request, runtime);
}
ModifyDataSourceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyDataSourceAttributeWithOptions(shared_ptr<ModifyDataSourceAttributeRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyDataSourceAttributeResponse(doRPCRequest(make_shared<string>("ModifyDataSourceAttribute"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyDataSourceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyDataSourceAttribute(shared_ptr<ModifyDataSourceAttributeRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyDataSourceAttributeWithOptions(request, runtime);
}
ModifyInstanceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyInstanceAttributeWithOptions(shared_ptr<ModifyInstanceAttributeRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyInstanceAttributeResponse(doRPCRequest(make_shared<string>("ModifyInstanceAttribute"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyInstanceAttributeResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyInstanceAttribute(shared_ptr<ModifyInstanceAttributeRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyInstanceAttributeWithOptions(request, runtime);
}
ModifyLogAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyLogAlarmTaskWithOptions(shared_ptr<ModifyLogAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyLogAlarmTaskResponse(doRPCRequest(make_shared<string>("ModifyLogAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyLogAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyLogAlarmTask(shared_ptr<ModifyLogAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyLogAlarmTaskWithOptions(request, runtime);
}
ModifyPlanResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyPlanWithOptions(shared_ptr<ModifyPlanRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyPlanResponse(doRPCRequest(make_shared<string>("ModifyPlan"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyPlanResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyPlan(shared_ptr<ModifyPlanRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyPlanWithOptions(request, runtime);
}
ModifyReportPushTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyReportPushTaskWithOptions(shared_ptr<ModifyReportPushTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyReportPushTaskResponse(doRPCRequest(make_shared<string>("ModifyReportPushTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyReportPushTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyReportPushTask(shared_ptr<ModifyReportPushTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyReportPushTaskWithOptions(request, runtime);
}
ModifyRuleGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyRuleGroupWithOptions(shared_ptr<ModifyRuleGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifyRuleGroupResponse(doRPCRequest(make_shared<string>("ModifyRuleGroup"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifyRuleGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifyRuleGroup(shared_ptr<ModifyRuleGroupRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifyRuleGroupWithOptions(request, runtime);
}
ModifySystemAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifySystemAlarmTaskWithOptions(shared_ptr<ModifySystemAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ModifySystemAlarmTaskResponse(doRPCRequest(make_shared<string>("ModifySystemAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ModifySystemAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::modifySystemAlarmTask(shared_ptr<ModifySystemAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return modifySystemAlarmTaskWithOptions(request, runtime);
}
MoveResourceGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::moveResourceGroupWithOptions(shared_ptr<MoveResourceGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return MoveResourceGroupResponse(doRPCRequest(make_shared<string>("MoveResourceGroup"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
MoveResourceGroupResponse Alibabacloud_Yundun-dbaudit20180320::Client::moveResourceGroup(shared_ptr<MoveResourceGroupRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return moveResourceGroupWithOptions(request, runtime);
}
PciReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::pciReportWithOptions(shared_ptr<PciReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return PciReportResponse(doRPCRequest(make_shared<string>("PciReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
PciReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::pciReport(shared_ptr<PciReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return pciReportWithOptions(request, runtime);
}
PutLoginCountResponse Alibabacloud_Yundun-dbaudit20180320::Client::putLoginCountWithOptions(shared_ptr<PutLoginCountRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return PutLoginCountResponse(doRPCRequest(make_shared<string>("PutLoginCount"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
PutLoginCountResponse Alibabacloud_Yundun-dbaudit20180320::Client::putLoginCount(shared_ptr<PutLoginCountRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return putLoginCountWithOptions(request, runtime);
}
ReadMarkSystemAlarmsResponse Alibabacloud_Yundun-dbaudit20180320::Client::readMarkSystemAlarmsWithOptions(shared_ptr<ReadMarkSystemAlarmsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return ReadMarkSystemAlarmsResponse(doRPCRequest(make_shared<string>("ReadMarkSystemAlarms"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
ReadMarkSystemAlarmsResponse Alibabacloud_Yundun-dbaudit20180320::Client::readMarkSystemAlarms(shared_ptr<ReadMarkSystemAlarmsRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return readMarkSystemAlarmsWithOptions(request, runtime);
}
RefundInstanceResponse Alibabacloud_Yundun-dbaudit20180320::Client::refundInstanceWithOptions(shared_ptr<RefundInstanceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return RefundInstanceResponse(doRPCRequest(make_shared<string>("RefundInstance"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
RefundInstanceResponse Alibabacloud_Yundun-dbaudit20180320::Client::refundInstance(shared_ptr<RefundInstanceRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return refundInstanceWithOptions(request, runtime);
}
RegisterNoticeMailResponse Alibabacloud_Yundun-dbaudit20180320::Client::registerNoticeMailWithOptions(shared_ptr<RegisterNoticeMailRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return RegisterNoticeMailResponse(doRPCRequest(make_shared<string>("RegisterNoticeMail"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
RegisterNoticeMailResponse Alibabacloud_Yundun-dbaudit20180320::Client::registerNoticeMail(shared_ptr<RegisterNoticeMailRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return registerNoticeMailWithOptions(request, runtime);
}
RemoveLogMaskConfigResponse Alibabacloud_Yundun-dbaudit20180320::Client::removeLogMaskConfigWithOptions(shared_ptr<RemoveLogMaskConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return RemoveLogMaskConfigResponse(doRPCRequest(make_shared<string>("RemoveLogMaskConfig"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
RemoveLogMaskConfigResponse Alibabacloud_Yundun-dbaudit20180320::Client::removeLogMaskConfig(shared_ptr<RemoveLogMaskConfigRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return removeLogMaskConfigWithOptions(request, runtime);
}
SendVerifyCodeMailResponse Alibabacloud_Yundun-dbaudit20180320::Client::sendVerifyCodeMailWithOptions(shared_ptr<SendVerifyCodeMailRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return SendVerifyCodeMailResponse(doRPCRequest(make_shared<string>("SendVerifyCodeMail"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
SendVerifyCodeMailResponse Alibabacloud_Yundun-dbaudit20180320::Client::sendVerifyCodeMail(shared_ptr<SendVerifyCodeMailRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return sendVerifyCodeMailWithOptions(request, runtime);
}
SoxReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::soxReportWithOptions(shared_ptr<SoxReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return SoxReportResponse(doRPCRequest(make_shared<string>("SoxReport"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
SoxReportResponse Alibabacloud_Yundun-dbaudit20180320::Client::soxReport(shared_ptr<SoxReportRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return soxReportWithOptions(request, runtime);
}
StartAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::startAlarmTaskWithOptions(shared_ptr<StartAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return StartAlarmTaskResponse(doRPCRequest(make_shared<string>("StartAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
StartAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::startAlarmTask(shared_ptr<StartAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return startAlarmTaskWithOptions(request, runtime);
}
StartInstanceResponse Alibabacloud_Yundun-dbaudit20180320::Client::startInstanceWithOptions(shared_ptr<StartInstanceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return StartInstanceResponse(doRPCRequest(make_shared<string>("StartInstance"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
StartInstanceResponse Alibabacloud_Yundun-dbaudit20180320::Client::startInstance(shared_ptr<StartInstanceRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return startInstanceWithOptions(request, runtime);
}
StopAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::stopAlarmTaskWithOptions(shared_ptr<StopAlarmTaskRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return StopAlarmTaskResponse(doRPCRequest(make_shared<string>("StopAlarmTask"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
StopAlarmTaskResponse Alibabacloud_Yundun-dbaudit20180320::Client::stopAlarmTask(shared_ptr<StopAlarmTaskRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return stopAlarmTaskWithOptions(request, runtime);
}
TagResourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::tagResourcesWithOptions(shared_ptr<TagResourcesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return TagResourcesResponse(doRPCRequest(make_shared<string>("TagResources"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
TagResourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::tagResources(shared_ptr<TagResourcesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return tagResourcesWithOptions(request, runtime);
}
UntagResourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::untagResourcesWithOptions(shared_ptr<UntagResourcesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return UntagResourcesResponse(doRPCRequest(make_shared<string>("UntagResources"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
UntagResourcesResponse Alibabacloud_Yundun-dbaudit20180320::Client::untagResources(shared_ptr<UntagResourcesRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return untagResourcesWithOptions(request, runtime);
}
UpgradeInstanceVersionResponse Alibabacloud_Yundun-dbaudit20180320::Client::upgradeInstanceVersionWithOptions(shared_ptr<UpgradeInstanceVersionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) {
Darabonba_Util::Client::validateModel(request);
shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({
{"body", boost::any(Darabonba_Util::Client::toMap(request))}
}));
return UpgradeInstanceVersionResponse(doRPCRequest(make_shared<string>("UpgradeInstanceVersion"), make_shared<string>("2018-03-20"), make_shared<string>("HTTPS"), make_shared<string>("POST"), make_shared<string>("AK"), make_shared<string>("json"), req, runtime));
}
UpgradeInstanceVersionResponse Alibabacloud_Yundun-dbaudit20180320::Client::upgradeInstanceVersion(shared_ptr<UpgradeInstanceVersionRequest> request) {
shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>();
return upgradeInstanceVersionWithOptions(request, runtime);
}
|
{"hexsha": "778eb8c5be6375ad91838ceec324dbe09dab9ba9", "size": 114741, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "yundun-dbaudit-20180320/src/yundun_dbaudit_20180320.cpp", "max_stars_repo_name": "aliyun/alibabacloud-cpp-sdk", "max_stars_repo_head_hexsha": "0e7c0576abcd4ef1aef07d714b92654deb713c36", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2021-02-01T03:20:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T02:13:49.000Z", "max_issues_repo_path": "yundun-dbaudit-20180320/src/yundun_dbaudit_20180320.cpp", "max_issues_repo_name": "aliyun/alibabacloud-cpp-sdk", "max_issues_repo_head_hexsha": "0e7c0576abcd4ef1aef07d714b92654deb713c36", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2021-05-03T08:34:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T02:13:33.000Z", "max_forks_repo_path": "yundun-dbaudit-20180320/src/yundun_dbaudit_20180320.cpp", "max_forks_repo_name": "aliyun/alibabacloud-cpp-sdk", "max_forks_repo_head_hexsha": "0e7c0576abcd4ef1aef07d714b92654deb713c36", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2021-04-02T02:59:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T02:33:44.000Z", "avg_line_length": 77.3708698584, "max_line_length": 275, "alphanum_fraction": 0.8090394889, "num_tokens": 27985}
|
# standard library imports
import sys
import os
from os import path
# third party
import numpy as np
# local application imports
sys.path.append(path.dirname( path.dirname( path.abspath(__file__))))
from .base_lot import Lot
from utilities import *
class Orca(Lot):
def run(self,geom,multiplicity):
if self.lot_inp_file == False:
inpstring = '!'
inpstring += ' '+self.functional
inpstring += ' '+self.basis
inpstring += ' EnGrad\n\n'# SOSCF SlowConv \n\n'
inpstring += '%scf\nMaxIter 300\nconvergence strong\n sthresh 1e-7\n'
inpstring += 'thresh 1e-11\n tcut 1e-13 \n directresetfreq 1 \n SOSCFStart 0.00033\nend\n'
#inpstring += '%scf\nMaxIter 300\nend\n'
inpstring += '\n%maxcore 1000\n\n'
inpstring += '%pal\nnproc {}\nend\n\n'.format(self.nproc)
else:
inpstring = ''
with open(self.lot_inp_file) as lot_inp:
lot_inp_lines = lot_inp.readlines()
for line in lot_inp_lines:
inpstring += line
inpstring += '\n*xyz {} {}\n'.format(self.charge,multiplicity)
for coord in geom:
for i in coord:
inpstring += str(i)+' '
inpstring += '\n'
inpstring += '*'
tempfilename = 'tempORCAinp_{}'.format(multiplicity)
tempfile = open(tempfilename,'w')
tempfile.write(inpstring)
tempfile.close()
path2orca = os.popen('which orca').read().rstrip()
# path2orca = '/export/zimmerman/khyungju/orca_4_0_0_2_linux_x86-64/orca'
user = os.environ['USER']
cwd = os.environ['PWD']
try:
slurmID = os.environ['SLURM_ARRAY_JOB_ID']
try:
slurmTASK = os.environ['SLURM_ARRAY_TASK_ID']
runscr = '/tmp/'+user+'/'+slurmID+'/'+slurmTASK
except:
runscr = '/tmp/'+user+'/'+slurmID
except:
orcascr = 'temporcarun'
runscr = '/tmp/'+user+'/'+orcascr
os.system('mkdir -p {}'.format(runscr))
os.system('mv {} {}/'.format(tempfilename,runscr))
cmd = 'cd {}; {} {} > {}/{}.log; cd {}'.format(runscr,path2orca,tempfilename,runscr,tempfilename,cwd)
os.system(cmd)
engradpath = runscr+'/{}.engrad'.format(tempfilename)
with open(engradpath) as engradfile:
engradlines = engradfile.readlines()
temp = 100000
for i,lines in enumerate(engradlines):
if 'current total energy' in lines:
temp = i
if i > temp+1:
self.E.append((multiplicity,float(lines.split()[0])))
break
temp = 100000
tmp = []
tmp2 = []
for i,lines in enumerate(engradlines):
if 'current gradient' in lines:
temp = i
if i> temp+1:
if "#" in lines:
break
tmp2.append(float(lines.split()[0]))
if len(tmp2) == 3:
tmp.append(tmp2)
tmp2 = []
self.grada.append((multiplicity,tmp))
return
def get_energy(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
tmp = self.search_tuple(self.E,multiplicity)
return np.asarray(tmp[state][1])*units.KCAL_MOL_PER_AU
def get_gradient(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
tmp = self.search_tuple(self.grada,multiplicity)
return np.asarray(tmp[state][1])#*ANGSTROM_TO_AU #ORCA grad is given in AU
|
{"hexsha": "7cb118173279e42eee0f72b122f3c3038cc38123", "size": 4358, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygsm/level_of_theories/orca.py", "max_stars_repo_name": "espottesmith/pyGSM", "max_stars_repo_head_hexsha": "5bf263f9ef6cbee3ec16355c5eb1839446e704e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pygsm/level_of_theories/orca.py", "max_issues_repo_name": "espottesmith/pyGSM", "max_issues_repo_head_hexsha": "5bf263f9ef6cbee3ec16355c5eb1839446e704e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pygsm/level_of_theories/orca.py", "max_forks_repo_name": "espottesmith/pyGSM", "max_forks_repo_head_hexsha": "5bf263f9ef6cbee3ec16355c5eb1839446e704e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2478632479, "max_line_length": 109, "alphanum_fraction": 0.5587425425, "include": true, "reason": "import numpy", "num_tokens": 1090}
|
[STATEMENT]
lemma mod_less_diff_mod: "
\<lbrakk> n mod m < r; r \<le> m; r \<le> (n::nat) \<rbrakk> \<Longrightarrow>
(n - r) mod m = m + n mod m - r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>n mod m < r; r \<le> m; r \<le> n\<rbrakk> \<Longrightarrow> (n - r) mod m = m + n mod m - r
[PROOF STEP]
apply (case_tac "r = m")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>n mod m < r; r \<le> m; r \<le> n; r = m\<rbrakk> \<Longrightarrow> (n - r) mod m = m + n mod m - r
2. \<lbrakk>n mod m < r; r \<le> m; r \<le> n; r \<noteq> m\<rbrakk> \<Longrightarrow> (n - r) mod m = m + n mod m - r
[PROOF STEP]
apply (simp add: mod_diff_self2)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>n mod m < r; r \<le> m; r \<le> n; r \<noteq> m\<rbrakk> \<Longrightarrow> (n - r) mod m = m + n mod m - r
[PROOF STEP]
apply (simp add: mod_diff1_eq[of r n m])
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 442, "file": "List-Infinite_CommonArith_Util_Div", "length": 4}
|
/**
* @file fsareasearch.cpp
* @brief Floater to search and list objects in view or is known to the viewer.
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Phoenix Firestorm Viewer Source Code
* Copyright (c) 2012 Techwolf Lupindo
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* The Phoenix Firestorm Project, Inc., 1831 Oakwood Drive, Fairmont, Minnesota 56031-3225 USA
* http://www.firestormviewer.org
* $/LicenseInfo$
*/
#include "llviewerprecompiledheaders.h"
#include "fsareasearch.h"
#include "llavatarnamecache.h"
#include "llscrolllistctrl.h"
#include "lllineeditor.h"
#include "lltextbox.h"
#include "llfloaterreg.h"
#include "llagent.h"
#include "lltracker.h"
#include "llviewerobjectlist.h"
#include "llviewercontrol.h"
#include "llviewerparcelmgr.h"
#include <boost/algorithm/string/find.hpp> //for boost::ifind_first
#include "llviewerregion.h"
#include "llselectmgr.h"
#include "llcallbacklist.h"
#include "lltoolpie.h"
#include "llsaleinfo.h"
#include "llcheckboxctrl.h"
#include "llviewermenu.h" // handle_object_touch(), handle_buy()
#include "lltabcontainer.h"
#include "llspinctrl.h"
#include "lltoolgrab.h"
#include "fslslbridge.h"
#include "llcombobox.h"
#include "llnotificationsutil.h"
#include "fsassetblacklist.h"
#include "llworld.h"
#include "lltrans.h" // getString()
#include "llagentcamera.h" // gAgentCamera
#include "llviewerjoystick.h" // For disabling/re-enabling when requested to look at an object.
#include "llmoveview.h" // For LLPanelStandStopFlying::clearStandStopFlyingMode
#include "rlvactions.h"
#include "fsareasearchmenu.h"
#include "fsscrolllistctrl.h"
#include "llviewermediafocus.h"
#include "lltoolmgr.h"
// max number of objects that can be (de-)selected in a single packet.
const S32 MAX_OBJECTS_PER_PACKET = 255;
// time in seconds between refreshes when active
const F32 REFRESH_INTERVAL = 1.0f;
// this is used to prevent refreshing too often and affecting performance.
const F32 MIN_REFRESH_INTERVAL = 0.25f;
// how far the avatar needs to move to trigger a distance update
const F32 MIN_DISTANCE_MOVED = 1.0f;
// timeout to resend object properties request again
const F32 REQUEST_TIMEOUT = 30.0f;
F32 calculateObjectDistance(LLVector3d agent_pos, LLViewerObject* object)
{
if (object->isHUDAttachment())
{
return 0.f;
}
else
{
return dist_vec(agent_pos, object->getPositionGlobal());
}
}
class FSAreaSearch::FSParcelChangeObserver : public LLParcelObserver
{
public:
FSParcelChangeObserver(FSAreaSearch* area_search_floater) : mAreaSearchFloater(area_search_floater) {}
private:
/*virtual*/ void changed()
{
if (mAreaSearchFloater)
{
mAreaSearchFloater->checkRegion();
}
}
FSAreaSearch* mAreaSearchFloater;
};
class FSAreaSearchTouchTimer : public LLEventTimer
{
public:
FSAreaSearchTouchTimer(const LLUUID& object_id, F32 timeout) :
LLEventTimer(timeout),
mObjectID(object_id)
{
}
/*virtual*/ BOOL tick()
{
LLViewerObject* objectp = gObjectList.findObject(mObjectID);
if (objectp)
{
FSPanelAreaSearchList::touchObject(objectp);
}
return TRUE;
}
private:
LLUUID mObjectID;
};
FSAreaSearch::FSAreaSearch(const LLSD& key) :
LLFloater(key),
mActive(false),
mFilterForSale(false),
mFilterForSaleMin(0),
mFilterForSaleMax(999999),
mFilterPhysical(false),
mFilterTemporary(false),
mRegexSearch(false),
mFilterClickAction(false),
mFilterLocked(false),
mFilterPhantom(false),
mFilterAttachment(false),
mFilterMoaP(false),
mFilterDistance(false),
mFilterDistanceMin(0),
mFilterDistanceMax(999999),
mFilterPermCopy(false),
mFilterPermModify(false),
mFilterPermTransfer(false),
mFilterAgentParcelOnly(false),
mBeacons(false),
mExcludeAttachment(true),
mExcludeTemporary(true),
mExcludePhysics(true),
mExcludeChildPrims(true),
mExcludeNeighborRegions(true),
mRequestQueuePause(false),
mRequestNeedsSent(false)
{
//TODO: Multi-floater support and get rid of the singletin.
mInstance = this;
mFactoryMap["area_search_list_panel"] = LLCallbackMap(createPanelList, this);
mFactoryMap["area_search_find_panel"] = LLCallbackMap(createPanelFind, this);
mFactoryMap["area_search_filter_panel"] = LLCallbackMap(createPanelFilter, this);
mFactoryMap["area_search_advanced_panel"] = LLCallbackMap(createPanelAdvanced, this);
mFactoryMap["area_search_options_panel"] = LLCallbackMap(createPanelOptions, this);
// Register an idle update callback
gIdleCallbacks.addFunction(idle, this);
mParcelChangedObserver = new FSParcelChangeObserver(this);
LLViewerParcelMgr::getInstance()->addObserver(mParcelChangedObserver);
}
FSAreaSearch::~FSAreaSearch()
{
if (!gIdleCallbacks.deleteFunction(idle, this))
{
LL_WARNS("FSAreaSearch") << "FSAreaSearch::~FSAreaSearch() failed to delete callback" << LL_ENDL;
}
if (mParcelChangedObserver)
{
LLViewerParcelMgr::getInstance()->removeObserver(mParcelChangedObserver);
delete mParcelChangedObserver;
mParcelChangedObserver = NULL;
}
}
BOOL FSAreaSearch::postBuild()
{
mTab = getChild<LLTabContainer>("area_searchtab");
if (!gSavedSettings.getBOOL("FSAreaSearchAdvanced"))
{
LLPanel* advanced_tab = mTab->getPanelByName("area_search_advanced_panel");
if (advanced_tab)
{
mTab->removeTabPanel(advanced_tab);
}
}
return LLFloater::postBuild();
}
void FSAreaSearch::onOpen(const LLSD& key)
{
mTab->selectTab(1);
}
void FSAreaSearch::draw()
{
LLFloater::draw();
static LLCachedControl<S32> beacon_line_width(gSavedSettings, "DebugBeaconLineWidth");
static LLUIColor mBeaconColor = LLUIColorTable::getInstance()->getColor("AreaSearchBeaconColor");
static LLUIColor mBeaconTextColor = LLUIColorTable::getInstance()->getColor("PathfindingDefaultBeaconTextColor");
if (mBeacons)
{
std::vector<LLScrollListItem*> items = mPanelList->getResultList()->getAllData();
for (std::vector<LLScrollListItem*>::const_iterator item_it = items.begin();
item_it != items.end();
++item_it)
{
const LLScrollListItem* item = (*item_it);
LLViewerObject* objectp = gObjectList.findObject(item->getUUID());
if (objectp)
{
const std::string &objectName = mObjectDetails[item->getUUID()].description;
gObjectList.addDebugBeacon(objectp->getPositionAgent(), objectName, mBeaconColor, mBeaconTextColor, beacon_line_width);
}
}
}
}
//static
void FSAreaSearch::idle(void* user_data)
{
FSAreaSearch* self = (FSAreaSearch*)user_data;
self->findObjects();
self->processRequestQueue();
}
// static
void* FSAreaSearch::createPanelList(void* data)
{
FSAreaSearch* self = (FSAreaSearch*)data;
self->mPanelList = new FSPanelAreaSearchList(self);
return self->mPanelList;
}
// static
void* FSAreaSearch::createPanelFind(void* data)
{
FSAreaSearch* self = (FSAreaSearch*)data;
self->mPanelFind = new FSPanelAreaSearchFind(self);
return self->mPanelFind;
}
// static
void* FSAreaSearch::createPanelFilter(void* data)
{
FSAreaSearch* self = (FSAreaSearch*)data;
self->mPanelFilter = new FSPanelAreaSearchFilter(self);
return self->mPanelFilter;
}
// static
void* FSAreaSearch::createPanelAdvanced(void* data)
{
FSAreaSearch* self = (FSAreaSearch*)data;
self->mPanelAdvanced = new FSPanelAreaSearchAdvanced(self);
return self->mPanelAdvanced;
}
// static
void* FSAreaSearch::createPanelOptions(void* data)
{
FSAreaSearch* self = (FSAreaSearch*)data;
self->mPanelOptions = new FSPanelAreaSearchOptions(self);
return self->mPanelOptions;
}
void FSAreaSearch::checkRegion()
{
if (mInstance && mActive)
{
// Check if we changed region, and if we did, clear the object details cache.
LLViewerRegion* region = gAgent.getRegion(); // getRegion can return NULL if disconnected.
if (region && (region != mLastRegion))
{
if (!mExcludeNeighborRegions)
{
std::vector<LLViewerRegion*> uniqueRegions;
region->getNeighboringRegions(uniqueRegions);
if(std::find(uniqueRegions.begin(), uniqueRegions.end(), mLastRegion) != uniqueRegions.end())
{
// Crossed into a neighboring region, no need to clear everything.
mLastRegion = region;
return;
}
// else teleported into a new region
}
mLastRegion = region;
mRequested = 0;
mObjectDetails.clear();
mRegionRequests.clear();
mLastPropertiesReceivedTimer.start();
mPanelList->getResultList()->deleteAllItems();
mPanelList->setCounterText();
mPanelList->setAgentLastPosition(gAgent.getPositionGlobal());
mRefresh = true;
}
}
}
void FSAreaSearch::refreshList(bool cache_clear)
{
mActive = true;
checkRegion();
if (cache_clear)
{
mRequested = 0;
mObjectDetails.clear();
mRegionRequests.clear();
mLastPropertiesReceivedTimer.start();
}
else
{
for (std::map<LLUUID, FSObjectProperties>::iterator object_it = mObjectDetails.begin();
object_it != mObjectDetails.end();
++object_it)
{
object_it->second.listed = false;
}
}
mPanelList->getResultList()->deleteAllItems();
mPanelList->setCounterText();
mPanelList->setAgentLastPosition(gAgent.getPositionGlobal());
mNamesRequested.clear();
mRefresh = true;
findObjects();
}
void FSAreaSearch::findObjects()
{
// Only loop through the gObjectList every so often. There is a performance hit if done too often.
if (!(mActive && ((mRefresh && mLastUpdateTimer.getElapsedTimeF32() > MIN_REFRESH_INTERVAL) || mLastUpdateTimer.getElapsedTimeF32() > REFRESH_INTERVAL)))
{
return;
}
LLViewerRegion* our_region = gAgent.getRegion();
if (!our_region)
{
// Got disconnected or is in the middle of a teleport.
return;
}
LL_DEBUGS("FSAreaSearch_spammy") << "Doing a FSAreaSearch::findObjects" << LL_ENDL;
mLastUpdateTimer.stop(); // stop sets getElapsedTimeF32() time to zero.
// Pause processing of requestqueue until done adding new requests.
mRequestQueuePause = true;
checkRegion();
mRefresh = false;
mSearchableObjects = 0;
S32 object_count = gObjectList.getNumObjects();
for (S32 i = 0; i < object_count; i++)
{
LLViewerObject *objectp = gObjectList.getObject(i);
if (!(objectp && isSearchableObject(objectp, our_region)))
{
continue;
}
LLUUID object_id = objectp->getID();
if (object_id.isNull())
{
LL_WARNS("FSAreaSearch") << "WTF?! Selectable object with id of NULL!!" << LL_ENDL;
continue;
}
mSearchableObjects++;
if (mObjectDetails.count(object_id) == 0)
{
FSObjectProperties& details = mObjectDetails[object_id];
details.id = object_id;
details.local_id = objectp->getLocalID();
details.region_handle = objectp->getRegion()->getHandle();
mRequestNeedsSent = true;
mRequested++;
}
else
{
FSObjectProperties& details = mObjectDetails[object_id];
if (details.request == FSObjectProperties::FINISHED)
{
matchObject(details, objectp);
}
if (details.request == FSObjectProperties::FAILED)
{
// object came back into view
details.request = FSObjectProperties::NEED;
details.local_id = objectp->getLocalID();
details.region_handle = objectp->getRegion()->getHandle();
mRequestNeedsSent = true;
mRequested++;
}
}
}
mPanelList->updateScrollList();
S32 request_count = 0;
// requests for non-existent objects will never arrive, check and update the queue.
for (std::map<LLUUID, FSObjectProperties>::iterator object_it = mObjectDetails.begin();
object_it != mObjectDetails.end();
++object_it)
{
if (object_it->second.request == FSObjectProperties::NEED || object_it->second.request == FSObjectProperties::SENT)
{
LLUUID id = object_it->second.id;
LLViewerObject* objectp = gObjectList.findObject(id);
if (!objectp)
{
object_it->second.request = FSObjectProperties::FAILED;
mRequested--;
}
else
{
request_count++;
}
}
}
if (mRequested != request_count)
{
LL_DEBUGS("FSAreaSearch") << "Requested mismatch: " << request_count << " actual vs. " << mRequested << LL_ENDL;
mRequested = request_count;
}
updateCounterText();
mLastUpdateTimer.start(); // start also reset elapsed time to zero
mRequestQueuePause = false;
}
bool FSAreaSearch::isSearchableObject(LLViewerObject* objectp, LLViewerRegion* our_region)
{
// need to be connected to region object is in.
if (!objectp->getRegion())
{
return false;
}
// Land doesn't have object properties
if (objectp->getPCode() == LLViewerObject::LL_VO_SURFACE_PATCH)
{
return false;
}
// Object needs to be selectable to get properties from the server
if (!objectp->mbCanSelect)
{
return false;
}
// Avatars are spechiel objects that don't have normal properties
if (objectp->isAvatar())
{
return false;
}
//-----------------------------------------------------------------------
// Excludes
//-----------------------------------------------------------------------
if (mExcludeChildPrims && !(objectp->isRoot() || (objectp->isAttachment() && objectp->isRootEdit())))
{
return false;
}
if (mExcludeNeighborRegions && !(objectp->getRegion() == our_region))
{
return false;
}
if (mExcludeAttachment && objectp->isAttachment())
{
return false;
}
if (mExcludePhysics && objectp->flagUsePhysics())
{
return false;
}
if (mExcludeTemporary && objectp->flagTemporaryOnRez())
{
return false;
}
return true;
}
void FSAreaSearch::processRequestQueue()
{
if (!mActive || mRequestQueuePause)
{
return;
}
if (mLastPropertiesReceivedTimer.getElapsedTimeF32() > REQUEST_TIMEOUT)
{
LL_DEBUGS("FSAreaSearch") << "Timeout reached, resending requests."<< LL_ENDL;
S32 request_count = 0;
S32 failed_count = 0;
for (std::map<LLUUID, FSObjectProperties>::iterator object_it = mObjectDetails.begin();
object_it != mObjectDetails.end();
++object_it)
{
if (object_it->second.request == FSObjectProperties::SENT)
{
object_it->second.request = FSObjectProperties::NEED;
mRequestNeedsSent = true;
request_count++;
}
if (object_it->second.request == FSObjectProperties::FAILED)
{
failed_count++;
}
}
mRegionRequests.clear();
mLastPropertiesReceivedTimer.start();
if (!mRequestNeedsSent)
{
LL_DEBUGS("FSAreaSearch") << "No pending requests found."<< LL_ENDL;
}
else
{
LL_DEBUGS("FSAreaSearch") << request_count << " pending requests found."<< LL_ENDL;
}
LL_DEBUGS("FSAreaSearch") << failed_count << " failed requests found."<< LL_ENDL;
}
if (!mRequestNeedsSent)
{
return;
}
mRequestNeedsSent = false;
for (LLWorld::region_list_t::const_iterator iter = LLWorld::getInstance()->getRegionList().begin();
iter != LLWorld::getInstance()->getRegionList().end(); ++iter)
{
LLViewerRegion* regionp = *iter;
U64 region_handle = regionp->getHandle();
if (mRegionRequests[region_handle] > (MAX_OBJECTS_PER_PACKET + 128))
{
mRequestNeedsSent = true;
return;
}
std::vector<U32> request_list;
bool need_continue = false;
for (std::map<LLUUID, FSObjectProperties>::iterator object_it = mObjectDetails.begin();
object_it != mObjectDetails.end();
++object_it)
{
if (object_it->second.request == FSObjectProperties::NEED && object_it->second.region_handle == region_handle)
{
request_list.push_back(object_it->second.local_id);
object_it->second.request = FSObjectProperties::SENT;
mRegionRequests[region_handle]++;
if (mRegionRequests[region_handle] >= ((MAX_OBJECTS_PER_PACKET * 3) - 3))
{
requestObjectProperties(request_list, true, regionp);
requestObjectProperties(request_list, false, regionp);
mRequestNeedsSent = true;
need_continue = true;
break;
}
}
}
if (need_continue)
{
continue;
}
if (!request_list.empty())
{
requestObjectProperties(request_list, true, regionp);
requestObjectProperties(request_list, false, regionp);
}
}
}
void FSAreaSearch::requestObjectProperties(const std::vector<U32>& request_list, bool select, LLViewerRegion* regionp)
{
bool start_new_message = true;
S32 select_count = 0;
LLMessageSystem* msg = gMessageSystem;
for (std::vector<U32>::const_iterator iter = request_list.begin();
iter != request_list.end(); ++iter)
{
if (start_new_message)
{
if (select)
{
msg->newMessageFast(_PREHASH_ObjectSelect);
}
else
{
msg->newMessageFast(_PREHASH_ObjectDeselect);
}
msg->nextBlockFast(_PREHASH_AgentData);
msg->addUUIDFast(_PREHASH_AgentID, gAgentID);
msg->addUUIDFast(_PREHASH_SessionID, gAgentSessionID);
select_count++;
start_new_message = false;
}
msg->nextBlockFast(_PREHASH_ObjectData);
msg->addU32Fast(_PREHASH_ObjectLocalID, (*iter) );
select_count++;
if(msg->isSendFull(NULL) || select_count >= MAX_OBJECTS_PER_PACKET)
{
LL_DEBUGS("FSAreaSearch") << "Sent one full " << (select ? "ObjectSelect" : "ObjectDeselect") << " message with " << select_count << " object data blocks." << LL_ENDL;
msg->sendReliable(regionp->getHost());
select_count = 0;
start_new_message = true;
}
}
if (!start_new_message)
{
LL_DEBUGS("FSAreaSearch") << "Sent one partcial " << (select ? "ObjectSelect" : "ObjectDeselect") << " message with " << select_count << " object data blocks." << LL_ENDL;
msg->sendReliable(regionp->getHost());
}
}
void FSAreaSearch::processObjectProperties(LLMessageSystem* msg)
{
// This function is called by llviewermessage even if no floater has been created.
if (!(mInstance && mActive))
{
return;
}
LLViewerRegion* our_region = gAgent.getRegion();
bool counter_text_update = false;
S32 count = msg->getNumberOfBlocksFast(_PREHASH_ObjectData);
LL_DEBUGS("FSAreaSearch") << "Got processObjectProperties message with " << count << " object(s)" << LL_ENDL;
for (S32 i = 0; i < count; i++)
{
LLUUID object_id;
msg->getUUIDFast(_PREHASH_ObjectData, _PREHASH_ObjectID, object_id, i);
if (object_id.isNull())
{
LL_WARNS("FSAreaSearch") << "Got Object Properties with NULL id" << LL_ENDL;
continue;
}
LLViewerObject* objectp = gObjectList.findObject(object_id);
if (!objectp)
{
continue;
}
FSObjectProperties& details = mObjectDetails[object_id];
if (details.request != FSObjectProperties::FINISHED)
{
// We cache un-requested objects (to avoid having to request them later)
// and requested objects.
details.request = FSObjectProperties::FINISHED;
mLastPropertiesReceivedTimer.start();
if (details.id.isNull())
{
// Recieved object properties without requesting it.
details.id = object_id;
}
else
{
if (mRequested > 0)
{
mRequested--;
}
mRegionRequests[details.region_handle]--;
counter_text_update = true;
}
msg->getUUIDFast(_PREHASH_ObjectData, _PREHASH_CreatorID, details.creator_id, i);
msg->getUUIDFast(_PREHASH_ObjectData, _PREHASH_OwnerID, details.owner_id, i);
msg->getUUIDFast(_PREHASH_ObjectData, _PREHASH_GroupID, details.group_id, i);
msg->getU64Fast(_PREHASH_ObjectData, _PREHASH_CreationDate, details.creation_date, i);
msg->getU32Fast(_PREHASH_ObjectData, _PREHASH_BaseMask, details.base_mask, i);
msg->getU32Fast(_PREHASH_ObjectData, _PREHASH_OwnerMask, details.owner_mask, i);
msg->getU32Fast(_PREHASH_ObjectData,_PREHASH_GroupMask, details.group_mask, i);
msg->getU32Fast(_PREHASH_ObjectData, _PREHASH_EveryoneMask, details.everyone_mask, i);
msg->getU32Fast(_PREHASH_ObjectData, _PREHASH_NextOwnerMask, details.next_owner_mask, i);
details.sale_info.unpackMultiMessage(msg, _PREHASH_ObjectData, i);
details.ag_perms.unpackMessage(msg, _PREHASH_ObjectData, _PREHASH_AggregatePerms, i);
details.ag_texture_perms.unpackMessage(msg, _PREHASH_ObjectData, _PREHASH_AggregatePermTextures, i);
details.ag_texture_perms_owner.unpackMessage(msg, _PREHASH_ObjectData, _PREHASH_AggregatePermTexturesOwner, i);
details.category.unpackMultiMessage(msg, _PREHASH_ObjectData, i);
msg->getUUIDFast(_PREHASH_ObjectData, _PREHASH_LastOwnerID, details.last_owner_id, i);
msg->getStringFast(_PREHASH_ObjectData, _PREHASH_Name, details.name, i);
msg->getStringFast(_PREHASH_ObjectData, _PREHASH_Description, details.description, i);
msg->getStringFast(_PREHASH_ObjectData, _PREHASH_TouchName, details.touch_name, i);
msg->getStringFast(_PREHASH_ObjectData, _PREHASH_SitName, details.sit_name, i);
S32 size = msg->getSizeFast(_PREHASH_ObjectData, i, _PREHASH_TextureID);
if (size > 0)
{
S8 packed_buffer[SELECT_MAX_TES * UUID_BYTES];
msg->getBinaryDataFast(_PREHASH_ObjectData, _PREHASH_TextureID, packed_buffer, 0, i, SELECT_MAX_TES * UUID_BYTES);
for (S32 buf_offset = 0; buf_offset < size; buf_offset += UUID_BYTES)
{
LLUUID tid;
memcpy(tid.mData, packed_buffer + buf_offset, UUID_BYTES); /* Flawfinder: ignore */
details.texture_ids.push_back(tid);
}
}
details.permissions.init(details.creator_id, details.owner_id, details.last_owner_id, details.group_id);
details.permissions.initMasks(details.base_mask, details.owner_mask, details.everyone_mask, details.group_mask, details.next_owner_mask);
// Sets the group owned BOOL and real owner id, group or owner depending if object is group owned.
details.permissions.getOwnership(details.ownership_id, details.group_owned);
LL_DEBUGS("FSAreaSearch_spammy") << "Got properties for object: " << object_id << LL_ENDL;
if (isSearchableObject(objectp, our_region))
{
matchObject(details, objectp);
}
}
}
if (counter_text_update)
{
updateCounterText();
}
}
void FSAreaSearch::matchObject(FSObjectProperties& details, LLViewerObject* objectp)
{
if (details.listed)
{
// object allready listed on the scroll list.
return;
}
//-----------------------------------------------------------------------
// Filters
//-----------------------------------------------------------------------
if (mFilterForSale && !(details.sale_info.isForSale() && (details.sale_info.getSalePrice() >= mFilterForSaleMin && details.sale_info.getSalePrice() <= mFilterForSaleMax)))
{
return;
}
if (mFilterDistance)
{
S32 distance = (S32)calculateObjectDistance(mPanelList->getAgentLastPosition(), objectp);// used mAgentLastPosition instead of gAgent->getPositionGlobal for performace
if (!(distance >= mFilterDistanceMin && distance <= mFilterDistanceMax))
{
return;
}
}
if (mFilterClickAction)
{
switch(mFilterClickActionType)
{
case 0: // "(blank)", should not end up here, but just in case
break;
case 1: // "any" mouse click action
if (!(objectp->flagHandleTouch() || objectp->getClickAction() != 0))
{
return;
}
break;
case 2: // "touch" is a seperate mouse click action flag
if (!objectp->flagHandleTouch())
{
return;
}
break;
default: // all other mouse click action types
if ((mFilterClickActionType - 2) != objectp->getClickAction())
{
return;
}
break;
}
}
//TODO: texture id search
// for (uuid_vec_t::const_iterator texture_it = details.texture_ids.begin();
// texture_it != details.texture_ids.end(); ++texture_it)
// {
// if ( "" == (*texture_it).asString())
// {
// }
// }
if (mFilterPhysical && !objectp->flagUsePhysics())
{
return;
}
if (mFilterTemporary && !objectp->flagTemporaryOnRez())
{
return;
}
if (mFilterLocked && (details.owner_mask & PERM_MOVE))
{
return;
}
if (mFilterPhantom && !objectp->flagPhantom())
{
return;
}
if (mFilterAttachment && !objectp->isAttachment())
{
return;
}
if (mFilterMoaP)
{
bool moap = false;
U8 texture_count = objectp->getNumTEs();
for(U8 i = 0; i < texture_count; i++)
{
if(objectp->getTE(i)->hasMedia())
{
moap = true;
break;
}
}
if(!moap)
{
return;
}
}
if (mFilterAgentParcelOnly && !LLViewerParcelMgr::instance().inAgentParcel(objectp->getPositionGlobal()))
{
return;
}
if (mFilterPermCopy && !(details.owner_mask & PERM_COPY))
{
return;
}
if (mFilterPermModify && !(details.owner_mask & PERM_MODIFY))
{
return;
}
if (mFilterPermTransfer && !(details.owner_mask & PERM_TRANSFER))
{
return;
}
//-----------------------------------------------------------------------
// Find text
//-----------------------------------------------------------------------
LLUUID object_id = details.id;
std::string creator_name;
std::string owner_name;
std::string last_owner_name;
std::string group_name;
std::string object_name = details.name;
std::string object_description = details.description;
details.name_requested = false;
getNameFromUUID(details.ownership_id, owner_name, details.group_owned, details.name_requested);
getNameFromUUID(details.creator_id, creator_name, false, details.name_requested);
getNameFromUUID(details.last_owner_id, last_owner_name, false, details.name_requested);
getNameFromUUID(details.group_id, group_name, true, details.name_requested);
if (mRegexSearch)
{
try
{
if (!mSearchName.empty() && !boost::regex_match(object_name, mRegexSearchName))
{
return;
}
if (!mSearchDescription.empty() && !boost::regex_match(object_description, mRegexSearchDescription))
{
return;
}
if (!mSearchOwner.empty() && !boost::regex_match(owner_name, mRegexSearchOwner))
{
return;
}
if (!mSearchGroup.empty() && !boost::regex_match(group_name, mRegexSearchGroup))
{
return;
}
if (!mSearchCreator.empty() && !boost::regex_match(creator_name, mRegexSearchCreator))
{
return;
}
if (!mSearchLastOwner.empty() && !boost::regex_match(last_owner_name, mRegexSearchLastOwner))
{
return;
}
}
// Should not end up here due to error checking in Find class. However, some complex regexes may
// cause excessive resources and boost will throw an execption.
// Due to the possiablitey of hitting this block a 1000 times per second, only logonce it.
catch(boost::regex_error& e)
{
LL_WARNS_ONCE("FSAreaSearch") << "boost::regex_error error in regex: "<< e.what() << LL_ENDL;
}
catch(const std::exception& e)
{
LL_WARNS_ONCE("FSAreaSearch") << "std::exception error in regex: "<< e.what() << LL_ENDL;
}
catch (...)
{
LL_WARNS_ONCE("FSAreaSearch") << "Unknown error in regex" << LL_ENDL;
}
}
else
{
if (!mSearchName.empty() && boost::ifind_first(object_name, mSearchName).empty())
{
return;
}
if (!mSearchDescription.empty() && boost::ifind_first(object_description, mSearchDescription).empty())
{
return;
}
if (!mSearchOwner.empty() && boost::ifind_first(owner_name, mSearchOwner).empty())
{
return;
}
if (!mSearchGroup.empty() && boost::ifind_first(group_name, mSearchGroup).empty())
{
return;
}
if (!mSearchCreator.empty() && boost::ifind_first(creator_name, mSearchCreator).empty())
{
return;
}
if (!mSearchLastOwner.empty() && boost::ifind_first(last_owner_name, mSearchLastOwner).empty())
{
return;
}
}
//-----------------------------------------------------------------------
// Object passed all above tests, add it to the List tab.
//-----------------------------------------------------------------------
details.listed = true;
LLScrollListCell::Params cell_params;
cell_params.font = LLFontGL::getFontSansSerif();
LLScrollListItem::Params row_params;
row_params.value = object_id.asString();
cell_params.column = "distance";
cell_params.value = llformat("%1.0f m", calculateObjectDistance(mPanelList->getAgentLastPosition(), objectp)); // used mAgentLastPosition instead of gAgent->getPositionGlobal for performace
row_params.columns.add(cell_params);
cell_params.column = "name";
cell_params.value = details.name;
row_params.columns.add(cell_params);
cell_params.column = "description";
cell_params.value = details.description;
row_params.columns.add(cell_params);
cell_params.column = "price";
if (details.sale_info.isForSale())
{
LLStringUtil::format_map_t args;
args["COST"] = llformat("%d", details.sale_info.getSalePrice());
std::string cost_label = LLTrans::getString("FSAreaSearch_Cost_Label", args);
cell_params.value = cost_label;
}
else
{
cell_params.value = " ";
}
row_params.columns.add(cell_params);
cell_params.column = "land_impact";
F32 cost = objectp->getLinksetCost();
if (cost > F_ALMOST_ZERO)
{
cell_params.value = cost;
}
else
{
cell_params.value = "...";
}
row_params.columns.add(cell_params);
cell_params.column = "prim_count";
cell_params.value = objectp->numChildren() + 1;
row_params.columns.add(cell_params);
cell_params.column = "owner";
cell_params.value = owner_name;
row_params.columns.add(cell_params);
cell_params.column = "group";
cell_params.value = group_name;
row_params.columns.add(cell_params);
cell_params.column = "creator";
cell_params.value = creator_name;
row_params.columns.add(cell_params);
cell_params.column = "last_owner";
cell_params.value = last_owner_name;
row_params.columns.add(cell_params);
LLScrollListItem* list_row = mPanelList->getResultList()->addRow(row_params);
if (objectp->flagTemporaryOnRez() || objectp->flagUsePhysics())
{
U8 font_style = LLFontGL::NORMAL;
if (objectp->flagTemporaryOnRez())
{
font_style |= LLFontGL::ITALIC;
}
if (objectp->flagUsePhysics())
{
font_style |= LLFontGL::BOLD;
}
S32 num_colums = list_row->getNumColumns();
for (S32 i = 0; i < num_colums; i++)
{
LLScrollListText* list_cell = (LLScrollListText*)list_row->getColumn(i);
list_cell->setFontStyle(font_style);
}
}
mPanelList->getResultList()->refreshLineHeight();
}
// <FS:Cron> Allows the object costs to be updated on-the-fly so as to bypass the problem with the data being stale when first accessed.
void FSAreaSearch::updateObjectCosts(const LLUUID& object_id, F32 object_cost, F32 link_cost, F32 physics_cost, F32 link_physics_cost)
{
// This fuction is called by LLObjectCostResponder::result even if no floater has been created.
if (!(mInstance && mActive))
{
return;
}
FSScrollListCtrl* result_list = mPanelList->getResultList();
if (result_list)
{
LLScrollListItem* list_row = result_list->getItem(LLSD(object_id));
if (list_row)
{
LLScrollListColumn* list_column = result_list->getColumn("land_impact");
if (list_column)
{
LLScrollListCell* linkset_cost_cell = list_row->getColumn(list_column->mIndex);
linkset_cost_cell->setValue(LLSD(link_cost));
result_list->setNeedsSort(); // re-sort if needed.
}
}
}
}
void FSAreaSearch::getNameFromUUID(LLUUID& id, std::string& name, BOOL group, bool& name_requested)
{
if (group)
{
BOOL is_group;
if(!gCacheName->getIfThere(id, name, is_group))
{
if(std::find(mNamesRequested.begin(), mNamesRequested.end(), id) == mNamesRequested.end())
{
mNamesRequested.push_back(id);
gCacheName->get(id, group, boost::bind(&FSAreaSearch::callbackLoadFullName, this, _1, _2));
}
name_requested = true;
}
}
else
{
LLAvatarName av_name;
if (LLAvatarNameCache::get(id, &av_name))
{
name = av_name.getUserName();
}
else
{
if(std::find(mNamesRequested.begin(), mNamesRequested.end(), id) == mNamesRequested.end())
{
mNamesRequested.push_back(id);
LLAvatarNameCache::get(id, boost::bind(&FSAreaSearch::avatarNameCacheCallback, this, _1, _2));
}
name_requested = true;
}
}
}
void FSAreaSearch::avatarNameCacheCallback(const LLUUID& id, const LLAvatarName& av_name)
{
callbackLoadFullName(id, av_name.getUserName());
}
void FSAreaSearch::callbackLoadFullName(const LLUUID& id, const std::string& full_name)
{
LLViewerRegion* our_region = gAgent.getRegion();
for (std::map<LLUUID, FSObjectProperties>::iterator object_it = mObjectDetails.begin();
object_it != mObjectDetails.end();
++object_it)
{
if (object_it->second.name_requested && !object_it->second.listed)
{
LLUUID object_id = object_it->second.id;
LLViewerObject* objectp = gObjectList.findObject(object_id);
if (objectp && isSearchableObject(objectp, our_region))
{
matchObject(object_it->second, objectp);
}
}
}
mPanelList->updateName(id, full_name);
}
void FSAreaSearch::updateCounterText()
{
LLStringUtil::format_map_t args;
args["[LISTED]"] = llformat("%d", mPanelList->getResultList()->getItemCount());
args["[PENDING]"] = llformat("%d", mRequested);
args["[TOTAL]"] = llformat("%d", mSearchableObjects);
mPanelList->setCounterText(args);
}
void FSAreaSearch::onCommitLine()
{
mSearchName = mPanelFind->mNameLineEditor->getText();
mSearchDescription = mPanelFind->mDescriptionLineEditor->getText();
mSearchOwner = mPanelFind->mOwnerLineEditor->getText();
mSearchGroup = mPanelFind->mGroupLineEditor->getText();
mSearchCreator = mPanelFind->mCreatorLineEditor->getText();
mSearchLastOwner = mPanelFind->mLastOwnerLineEditor->getText();
if (mRegexSearch)
{
if (!mSearchName.empty())
{
if (regexTest(mSearchName))
{
mRegexSearchName = mSearchName.c_str();
}
else
{
// empty the search text to prevent error in matchObject
mSearchName.erase();
}
}
if (!mSearchDescription.empty())
{
if (regexTest(mSearchDescription))
{
mRegexSearchDescription = mSearchDescription.c_str();
}
else
{
mSearchDescription.erase();
}
}
if (!mSearchOwner.empty())
{
if (regexTest(mSearchOwner))
{
mRegexSearchOwner = mSearchOwner.c_str();
}
else
{
mSearchOwner.erase();
}
}
if (!mSearchGroup.empty())
{
if (regexTest(mSearchGroup))
{
mRegexSearchGroup = mSearchGroup.c_str();
}
else
{
mSearchGroup.erase();
}
}
if (!mSearchCreator.empty())
{
if (regexTest(mSearchCreator))
{
mRegexSearchCreator = mSearchCreator.c_str();
}
else
{
mSearchCreator.erase();
}
}
if (!mSearchLastOwner.empty())
{
if (regexTest(mSearchLastOwner))
{
mRegexSearchLastOwner = mSearchLastOwner.c_str();
}
else
{
mSearchLastOwner.erase();
}
}
}
}
bool FSAreaSearch::regexTest(std::string text)
{
// couple regex patters one can use for testing. The regex will match a UUID.
// boost::regex pattern("[\\w]{8}-[\\w]{4}-[\\w]{4}-[\\w]{4}-[\\w]{12}");
// [\p{XDigit}]{8}(-[\p{XDigit}]{4}){3}-[\p{XDigit}]{12}
// to find all objects that don't belong to a group, use (?!^Name of the group$).* in the group field.
try
{
std::string test_text = "asdfghjklqwerty1234567890";
boost::regex pattern(text.c_str());
boost::regex_match(test_text, pattern);
}
catch(boost::regex_error& e)
{
LLSD args;
args["EWHAT"] = e.what();
LLNotificationsUtil::add("RegExFail", args);
LL_DEBUGS("FSAreaSearch") << "boost::regex_error error in regex: "<< e.what() << LL_ENDL;
return false;
}
catch(const std::exception& e)
{
LLSD args;
args["EWHAT"] = e.what();
LLNotificationsUtil::add("RegExFail", args);
LL_DEBUGS("FSAreaSearch") << "std::exception error in regex: "<< e.what() << LL_ENDL;
return false;
}
catch (...)
{
LLSD args;
args["EWHAT"] = "Unknown Error.";
LLNotificationsUtil::add("RegExFail", args);
LL_DEBUGS("FSAreaSearch") << "Unknown error in regex" << LL_ENDL;
return false;
}
return true;
}
void FSAreaSearch::clearSearchText()
{
mSearchName.erase();
mSearchDescription.erase();
mSearchOwner.erase();
mSearchGroup.erase();
mSearchCreator.erase();
mSearchLastOwner.erase();
}
void FSAreaSearch::onButtonClickedSearch()
{
// if the user blanks out a line, onCommitLine is not fired/called.
// calling this will make sure to update any blanked out lines.
onCommitLine();
mTab->selectFirstTab();
refreshList(false);
}
void FSAreaSearch::onCommitCheckboxRegex()
{
mRegexSearch = mPanelFind->mCheckboxRegex->get();
if (mRegexSearch)
{
onCommitLine();
}
}
void FSAreaSearch::setFindOwnerText(std::string value)
{
mPanelFind->mOwnerLineEditor->setText(value);
}
//---------------------------------------------------------------------------
// List panel
//---------------------------------------------------------------------------
FSPanelAreaSearchList::FSPanelAreaSearchList(FSAreaSearch* pointer)
: LLPanel(),
mCounterText(0),
mResultList(0),
mFSAreaSearch(pointer),
mFSAreaSearchColumnConfigConnection()
{
mColumnBits["distance"] = 1;
mColumnBits["name"] = 2;
mColumnBits["description"] = 4;
mColumnBits["price"] = 8;
mColumnBits["land_impact"] = 16;
mColumnBits["prim_count"] = 32;
mColumnBits["owner"] = 64;
mColumnBits["group"] = 128;
mColumnBits["creator"] = 256;
mColumnBits["last_owner"] = 512;
}
BOOL FSPanelAreaSearchList::postBuild()
{
mResultList = getChild<FSScrollListCtrl>("result_list");
mResultList->setDoubleClickCallback(boost::bind(&FSPanelAreaSearchList::onDoubleClick, this));
mResultList->sortByColumn("name", TRUE);
mResultList->setContextMenu(&gFSAreaSearchMenu);
mCounterText = getChild<LLTextBox>("counter");
mRefreshButton = getChild<LLButton>("Refresh");
mRefreshButton->setClickedCallback(boost::bind(&FSPanelAreaSearchList::onClickRefresh, this));
mCheckboxBeacons = getChild<LLCheckBoxCtrl>("beacons");
mCheckboxBeacons->setCommitCallback(boost::bind(&FSPanelAreaSearchList::onCommitCheckboxBeacons, this));
mAgentLastPosition = gAgent.getPositionGlobal();
updateResultListColumns();
mFSAreaSearchColumnConfigConnection = gSavedSettings.getControl("FSAreaSearchColumnConfig")->getSignal()->connect(boost::bind(&FSPanelAreaSearchList::updateResultListColumns, this));
return LLPanel::postBuild();
}
// virtual
FSPanelAreaSearchList::~FSPanelAreaSearchList()
{
if (mFSAreaSearchColumnConfigConnection.connected())
{
mFSAreaSearchColumnConfigConnection.disconnect();
}
}
void FSPanelAreaSearchList::onClickRefresh()
{
mFSAreaSearch->refreshList(true);
}
void FSPanelAreaSearchList::onCommitCheckboxBeacons()
{
mFSAreaSearch->setBeacons(mCheckboxBeacons->get());
}
void FSPanelAreaSearchList::setCounterText()
{
mCounterText->setText(getString("ListedPendingTotalBlank"));
}
void FSPanelAreaSearchList::setCounterText(LLStringUtil::format_map_t args)
{
mCounterText->setText(getString("ListedPendingTotalFilled", args));
}
void FSPanelAreaSearchList::onDoubleClick()
{
LLScrollListItem *item = mResultList->getFirstSelected();
if (!item) return;
LLUUID object_id = item->getUUID();
LLViewerObject* objectp = gObjectList.findObject(object_id);
if (objectp)
{
FSObjectProperties& details = mFSAreaSearch->mObjectDetails[object_id];
LLTracker::trackLocation(objectp->getPositionGlobal(), details.name, "", LLTracker::LOCATION_ITEM);
if (mFSAreaSearch->getPanelAdvanced()->mCheckboxClickBuy->get())
{
buyObject(details, objectp);
}
if (mFSAreaSearch->getPanelAdvanced()->mCheckboxClickTouch->get())
{
touchObject(objectp);
}
if (mFSAreaSearch->getPanelAdvanced()->mCheckboxClickSit->get())
{
sitOnObject(details, objectp);
}
}
}
void FSPanelAreaSearchList::updateScrollList()
{
bool agent_moved = false;
const LLVector3d current_agent_position = gAgent.getPositionGlobal();
if (dist_vec(mAgentLastPosition, current_agent_position) > MIN_DISTANCE_MOVED)
{
agent_moved = true;
mAgentLastPosition = current_agent_position;
}
bool deleted = false;
LLScrollListColumn* distance_column = mResultList->getColumn("distance");
LLViewerRegion* our_region = gAgent.getRegion();
// Iterate over the rows in the list, deleting ones whose object has gone away.
std::vector<LLScrollListItem*> items = mResultList->getAllData();
for (std::vector<LLScrollListItem*>::iterator item_it = items.begin();
item_it != items.end();
++item_it)
{
LLScrollListItem* item = (*item_it);
LLUUID row_id = item->getUUID();
LLViewerObject* objectp = gObjectList.findObject(row_id);
if ((!objectp) || (!mFSAreaSearch->isSearchableObject(objectp, our_region)))
{
// This item's object has been deleted -- remove the row.
// Removing the row won't throw off our iteration, since we have a local copy of the array.
// We just need to make sure we don't access this item after the delete.
mResultList->deleteSingleItem(mResultList->getItemIndex(row_id));
mFSAreaSearch->mObjectDetails[row_id].listed = false ;
deleted = true;
}
else
{
if (agent_moved && distance_column)
{
item->getColumn(distance_column->mIndex)->setValue(LLSD(llformat("%1.0f m", calculateObjectDistance(current_agent_position, objectp))));
}
}
}
if (deleted || agent_moved)
{
mResultList->updateLayout();
}
}
void FSPanelAreaSearchList::updateResultListColumns()
{
U32 column_config = gSavedSettings.getU32("FSAreaSearchColumnConfig");
std::vector<LLScrollListColumn::Params> column_params = mResultList->getColumnInitParams();
std::string current_sort_col = mResultList->getSortColumnName();
BOOL current_sort_asc = mResultList->getSortAscending();
mResultList->clearColumns();
mResultList->updateLayout();
std::vector<LLScrollListColumn::Params>::iterator param_it;
for (param_it = column_params.begin(); param_it != column_params.end(); ++param_it)
{
LLScrollListColumn::Params p = *param_it;
LLScrollListColumn::Params params;
params.header = p.header;
params.name = p.name;
params.halign = p.halign;
params.sort_direction = p.sort_direction;
params.sort_column = p.sort_column;
params.tool_tip = p.tool_tip;
if (column_config & mColumnBits[p.name.getValue()])
{
params.width = p.width;
}
else
{
params.width.pixel_width.set(-1, true);
}
mResultList->addColumn(params);
}
mResultList->sortByColumn(current_sort_col, current_sort_asc);
mResultList->dirtyColumns();
mResultList->updateColumns(true);
}
void FSPanelAreaSearchList::onColumnVisibilityChecked(const LLSD& userdata)
{
std::string column = userdata.asString();
U32 column_config = gSavedSettings.getU32("FSAreaSearchColumnConfig");
U32 new_value;
U32 enabled = (mColumnBits[column] & column_config);
if (enabled)
{
new_value = (column_config & ~mColumnBits[column]);
}
else
{
new_value = (column_config | mColumnBits[column]);
}
gSavedSettings.setU32("FSAreaSearchColumnConfig", new_value);
updateResultListColumns();
}
bool FSPanelAreaSearchList::onEnableColumnVisibilityChecked(const LLSD& userdata)
{
std::string column = userdata.asString();
U32 column_config = gSavedSettings.getU32("FSAreaSearchColumnConfig");
return (mColumnBits[column] & column_config);
}
void FSPanelAreaSearchList::updateName(LLUUID id, std::string name)
{
LLScrollListColumn* creator_column = mResultList->getColumn("creator");
LLScrollListColumn* owner_column = mResultList->getColumn("owner");
LLScrollListColumn* group_column = mResultList->getColumn("group");
LLScrollListColumn* last_owner_column = mResultList->getColumn("last_owner");
// Iterate over the rows in the list, updating the ones with matching id.
std::vector<LLScrollListItem*> items = mResultList->getAllData();
for (std::vector<LLScrollListItem*>::iterator item_it = items.begin();
item_it != items.end();
++item_it)
{
LLScrollListItem* item = (*item_it);
LLUUID row_id = item->getUUID();
FSObjectProperties& details = mFSAreaSearch->mObjectDetails[row_id];
if (creator_column && (id == details.creator_id))
{
LLScrollListText* creator_text = (LLScrollListText*)item->getColumn(creator_column->mIndex);
creator_text->setText(name);
mResultList->setNeedsSort();
}
if (owner_column && (id == details.owner_id))
{
LLScrollListText* owner_text = (LLScrollListText*)item->getColumn(owner_column->mIndex);
owner_text->setText(name);
mResultList->setNeedsSort();
}
if (group_column && (id == details.group_id))
{
LLScrollListText* group_text = (LLScrollListText*)item->getColumn(group_column->mIndex);
group_text->setText(name);
mResultList->setNeedsSort();
}
if (last_owner_column && (id == details.last_owner_id))
{
LLScrollListText* last_owner_text = (LLScrollListText*)item->getColumn(last_owner_column->mIndex);
last_owner_text->setText(name);
mResultList->setNeedsSort();
}
}
}
bool FSPanelAreaSearchList::onContextMenuItemEnable(const LLSD& userdata)
{
std::string parameter = userdata.asString();
if (parameter == "one")
{
// return true if just one item is selected.
return (mResultList->getNumSelected() == 1);
}
else if (parameter == "in_dd")
{
// return true if the object is within the draw distance.
if (mResultList->getNumSelected() == 1)
{
LLUUID object_id = mResultList->getFirstSelected()->getUUID();
LLViewerObject* objectp = gObjectList.findObject(object_id);
return (objectp && calculateObjectDistance(gAgent.getPositionGlobal(), objectp) < gAgentCamera.mDrawDistance);
}
else
{
return false;
}
}
else if (parameter == "script")
{
return (mResultList->getNumSelected() > 0 && enable_bridge_function());
}
else
{
// return true if more then one is selected, but not just one.
return (mResultList->getNumSelected() > 1);
}
}
bool FSPanelAreaSearchList::onContextMenuItemClick(const LLSD& userdata)
{
std::string action = userdata.asString();
LL_DEBUGS("FSAreaSearch") << "Right click menu " << action << " was selected." << LL_ENDL;
if (action == "select_all")
{
std::vector<LLScrollListItem*> result_items = mResultList->getAllData();
std::for_each(result_items.begin(), result_items.end(), [](LLScrollListItem* item) { item->setSelected(TRUE); });
return true;
}
if (action == "clear_selection")
{
std::vector<LLScrollListItem*> selected_items = mResultList->getAllSelected();
std::for_each(selected_items.begin(), selected_items.end(), [](LLScrollListItem* item) { item->setSelected(FALSE); });
return true;
}
if (action == "filter_my_objects")
{
mFSAreaSearch->setFindOwnerText(gAgentUsername);
mFSAreaSearch->onButtonClickedSearch();
return true;
}
// NOTE that each action command MUST begin with a different letter.
char c = action.at(0);
switch(c)
{
case 't': // touch
case 's': // script
case 'l': // blacklist
{
std::vector<LLScrollListItem*> selected = mResultList->getAllSelected();
S32 cnt = 0;
for(std::vector<LLScrollListItem*>::iterator item_it = selected.begin();
item_it != selected.end(); ++item_it)
{
switch (c)
{
case 't': // touch
{
new FSAreaSearchTouchTimer((*item_it)->getUUID(), cnt * 0.2f);
cnt++;
}
break;
case 's': // script
FSLSLBridge::instance().viewerToLSL("getScriptInfo|" + (*item_it)->getUUID().asString() + "|" + (gSavedSettings.getBOOL("FSScriptInfoExtended") ? "1" : "0"));
break;
case 'l': // blacklist
{
LLUUID object_id = (*item_it)->getUUID();
LLViewerObject* objectp = gObjectList.findObject(object_id);
// if (objectp)
// [RLVa:KB] - Checked: RLVa-2.0.0 | FS-specific
// Don't allow derendering of own attachments when RLVa is enabled
if ( (objectp) && (gAgentID != objectp->getID()) && ((!RlvActions::isRlvEnabled()) || (!objectp->isAttachment()) || (!objectp->permYouOwner())) )
// [/RLVa:KB]
{
std::string region_name;
LLViewerRegion* region = objectp->getRegion();
if (region)
{
region_name = objectp->getRegion()->getName();
}
FSAssetBlacklist::getInstance()->addNewItemToBlacklist(object_id, mFSAreaSearch->mObjectDetails[object_id].name, region_name, LLAssetType::AT_OBJECT);
mFSAreaSearch->mObjectDetails.erase(object_id);
LLSelectMgr::getInstance()->deselectObjectOnly(objectp);
gObjectList.addDerenderedItem(object_id, true);
gObjectList.killObject(objectp);
if (LLViewerRegion::sVOCacheCullingEnabled && region)
{
region->killCacheEntry(objectp->getLocalID());
}
LLTool* tool = LLToolMgr::getInstance()->getCurrentTool();
LLViewerObject* tool_editing_object = tool->getEditingObject();
if (tool_editing_object && tool_editing_object->mID == object_id)
{
tool->stopEditing();
}
}
}
break;
default:
break;
}
}
}
break;
case 'b': // buy
case 'p': // p_teleport
case 'u': // sit
case 'q': // q_zoom
{
if (mResultList->getNumSelected() == 1)
{
LLUUID object_id = mResultList->getFirstSelected()->getUUID();
LLViewerObject* objectp = gObjectList.findObject(object_id);
if (objectp)
{
switch (c)
{
case 'b': // buy
buyObject(mFSAreaSearch->mObjectDetails[object_id], objectp);
break;
case 'p': // p_teleport
gAgent.teleportViaLocation(objectp->getPositionGlobal());
break;
case 'u': // sit
sitOnObject(mFSAreaSearch->mObjectDetails[object_id], objectp);
break;
case 'q': // q_zoom
{
// Disable flycam if active. Without this, the requested look-at doesn't happen because the flycam code overrides all other camera motion.
bool fly_cam_status(LLViewerJoystick::getInstance()->getOverrideCamera());
if (fly_cam_status)
{
LLViewerJoystick::getInstance()->setOverrideCamera(false);
LLPanelStandStopFlying::clearStandStopFlyingMode(LLPanelStandStopFlying::SSFM_FLYCAM);
// *NOTE: Above may not be the proper way to disable flycam. What I really want to do is just be able to move the camera and then leave the flycam in the the same state it was in, just moved to the new location. ~Cron
}
LLViewerJoystick::getInstance()->setCameraNeedsUpdate(true); // Fixes an edge case where if the user has JUST disabled flycam themselves, the camera gets stuck waiting for input.
gAgentCamera.setFocusOnAvatar(FALSE, ANIMATE);
gAgentCamera.setLookAt(LOOKAT_TARGET_SELECT, objectp);
// Place the camera looking at the object, along the line from the camera to the object,
// and sufficiently far enough away for the object to fill 3/4 of the screen,
// but not so close that the bbox's nearest possible vertex goes inside the near clip.
// Logic C&P'd from LLViewerMediaFocus::setCameraZoom() and then edited as needed
LLBBox bbox = objectp->getBoundingBoxAgent();
LLVector3d center(gAgent.getPosGlobalFromAgent(bbox.getCenterAgent()));
F32 height;
F32 width;
F32 depth;
F32 angle_of_view;
F32 distance;
LLVector3d target_pos(center);
LLVector3d camera_dir(gAgentCamera.getCameraPositionGlobal() - target_pos);
camera_dir.normalize();
// We need the aspect ratio, and the 3 components of the bbox as height, width, and depth.
F32 aspect_ratio(LLViewerMediaFocus::getBBoxAspectRatio(bbox, LLVector3(camera_dir), &height, &width, &depth));
F32 camera_aspect(LLViewerCamera::getInstance()->getAspect());
// We will normally use the side of the volume aligned with the short side of the screen (i.e. the height for
// a screen in a landscape aspect ratio), however there is an edge case where the aspect ratio of the object is
// more extreme than the screen. In this case we invert the logic, using the longer component of both the object
// and the screen.
bool invert((camera_aspect > 1.0f && aspect_ratio > camera_aspect) || (camera_aspect < 1.0f && aspect_ratio < camera_aspect));
// To calculate the optimum viewing distance we will need the angle of the shorter side of the view rectangle.
// In portrait mode this is the width, and in landscape it is the height.
// We then calculate the distance based on the corresponding side of the object bbox (width for portrait, height for landscape)
// We will add half the depth of the bounding box, as the distance projection uses the center point of the bbox.
if (camera_aspect < 1.0f || invert)
{
angle_of_view = llmax(0.1f, LLViewerCamera::getInstance()->getView() * LLViewerCamera::getInstance()->getAspect());
distance = width * 0.5 * 1.1 / tanf(angle_of_view * 0.5f);
}
else
{
angle_of_view = llmax(0.1f, LLViewerCamera::getInstance()->getView());
distance = height * 0.5 * 1.1 / tanf(angle_of_view * 0.5f);
}
distance += depth * 0.5;
// Verify that the bounding box isn't inside the near clip. Using OBB-plane intersection to check if the
// near-clip plane intersects with the bounding box, and if it does, adjust the distance such that the
// object doesn't clip.
LLVector3d bbox_extents(bbox.getExtentLocal());
LLVector3d axis_x = LLVector3d(1, 0, 0) * bbox.getRotation();
LLVector3d axis_y = LLVector3d(0, 1, 0) * bbox.getRotation();
LLVector3d axis_z = LLVector3d(0, 0, 1) * bbox.getRotation();
//Normal of nearclip plane is camera_dir.
F32 min_near_clip_dist = bbox_extents.mdV[0] * (camera_dir * axis_x) + bbox_extents.mdV[1] * (camera_dir * axis_y) + bbox_extents.mdV[2] * (camera_dir * axis_z); // http://www.gamasutra.com/view/feature/131790/simple_intersection_tests_for_games.php?page=7
F32 camera_to_near_clip_dist(LLViewerCamera::getInstance()->getNear());
F32 min_camera_dist(min_near_clip_dist + camera_to_near_clip_dist);
if (distance < min_camera_dist)
{
// Camera is too close to object, some parts MIGHT clip. Move camera away to the position where clipping barely doesn't happen.
distance = min_camera_dist;
}
LLVector3d camera_pos(target_pos + camera_dir * distance);
if (camera_dir == LLVector3d::z_axis || camera_dir == LLVector3d::z_axis_neg)
{
// If the direction points directly up, the camera will "flip" around.
// We try to avoid this by adjusting the target camera position a
// smidge towards current camera position
// *NOTE: this solution is not perfect. All it attempts to solve is the
// "looking down" problem where the camera flips around when it animates
// to that position. You still are not guaranteed to be looking at the
// object in the correct orientation. What this solution does is it will
// put the camera into position keeping as best it can the current
// orientation with respect to the direction wanted. In other words, if
// before zoom the object appears "upside down" from the camera, after
/// zooming it will still be upside down, but at least it will not flip.
LLVector3d cur_camera_pos = LLVector3d(gAgentCamera.getCameraPositionGlobal());
LLVector3d delta = (cur_camera_pos - camera_pos);
F64 len = delta.length();
delta.normalize();
// Move 1% of the distance towards original camera location
camera_pos += 0.01 * len * delta;
}
gAgentCamera.setCameraPosAndFocusGlobal(camera_pos, target_pos, objectp->getID());
// *TODO: Re-enable joystick flycam if we disabled it earlier... Have to find some form of callback as re-enabling at this point causes the camera motion to not happen. ~Cron
//if (fly_cam_status)
//{
// LLViewerJoystick::getInstance()->toggleFlycam();
//}
}
break;
default:
break;
}
}
}
}
break;
case 'i': // inspect
case 'e': // edit
case 'd': // delete
case 'r': // return
{
// select the objects first
LLSelectMgr::getInstance()->deselectAll();
std::vector<LLScrollListItem*> selected = mResultList->getAllSelected();
for(std::vector<LLScrollListItem*>::iterator item_it = selected.begin();
item_it != selected.end(); ++item_it)
{
LLUUID object_id = (*item_it)->getUUID();
LLViewerObject* objectp = gObjectList.findObject(object_id);
if (objectp)
{
LLSelectMgr::getInstance()->selectObjectAndFamily(objectp);
if ( c == 'r' )
{
// need to set permissions for object return
LLSelectNode* node = LLSelectMgr::getInstance()->getSelection()->findNode(objectp);
if( !node )
break;
if( !mFSAreaSearch || mFSAreaSearch->mObjectDetails.end() == mFSAreaSearch->mObjectDetails.find(object_id ) )
break;
FSObjectProperties& details = mFSAreaSearch->mObjectDetails[object_id];
node->mValid = TRUE;
node->mPermissions->init(details.creator_id, details.owner_id, details.last_owner_id, details.group_id);
node->mPermissions->initMasks(details.base_mask, details.owner_mask, details.everyone_mask, details.group_mask, details.next_owner_mask);
node->mAggregatePerm = details.ag_perms;
}
}
}
// now act on those selected objects
switch (c)
{
case 'i': // inspect
LLFloaterReg::showInstance("inspect");
break;
case 'e': // edit
handle_object_edit();
break;
case 'd': // delete
handle_object_delete();
break;
case 'r': // return
handle_object_return();
break;
default:
break;
}
}
break;
default:
break;
}
return true;
}
// static
void FSPanelAreaSearchList::touchObject(LLViewerObject* objectp)
{
// *NOTE: Hope the packets arrive safely and in order or else
// there will be some problems.
if ( (!RlvActions::isRlvEnabled()) || (RlvActions::canTouch(objectp)) )
{
LLPickInfo pick; // default constructor will set sane values.
send_ObjectGrab_message(objectp, pick, LLVector3::zero);
send_ObjectDeGrab_message(objectp, pick);
}
}
void FSPanelAreaSearchList::buyObject(FSObjectProperties& details, LLViewerObject* objectp)
{
LLSelectMgr::getInstance()->deselectAll();
LLSelectMgr::getInstance()->selectObjectAndFamily(objectp);
LLSelectNode* node = LLSelectMgr::getInstance()->getSelection()->findNode(objectp);
if (node)
{
node->mValid = TRUE;
node->mPermissions->init(details.creator_id, details.owner_id, details.last_owner_id, details.group_id);
node->mPermissions->initMasks(details.base_mask, details.owner_mask, details.everyone_mask, details.group_mask, details.next_owner_mask);
node->mSaleInfo = details.sale_info;
node->mAggregatePerm = details.ag_perms;
node->mCategory = details.category;
node->mName.assign(details.name);
node->mDescription.assign(details.description);
handle_buy();
}
else
{
LL_WARNS("FSAreaSearch") << "No LLSelectNode node" << LL_ENDL;
}
}
void FSPanelAreaSearchList::sitOnObject(FSObjectProperties& details, LLViewerObject* objectp)
{
if ( (!RlvActions::isRlvEnabled()) || (RlvActions::canSit(objectp)) )
{
LLSelectMgr::getInstance()->deselectAll();
LLSelectMgr::getInstance()->selectObjectAndFamily(objectp);
LLSelectNode* node = LLSelectMgr::getInstance()->getSelection()->findNode(objectp);
if (node)
{
gMessageSystem->newMessageFast(_PREHASH_AgentRequestSit);
gMessageSystem->nextBlockFast(_PREHASH_AgentData);
gMessageSystem->addUUIDFast(_PREHASH_AgentID, gAgentID);
gMessageSystem->addUUIDFast(_PREHASH_SessionID, gAgentSessionID);
gMessageSystem->nextBlockFast(_PREHASH_TargetObject);
gMessageSystem->addUUIDFast(_PREHASH_TargetID, objectp->mID);
gMessageSystem->addVector3Fast(_PREHASH_Offset, LLVector3::zero);
objectp->getRegion()->sendReliableMessage();
}
else
{
LL_WARNS("FSAreaSearch") << "No LLSelectNode node" << LL_ENDL;
}
}
}
//---------------------------------------------------------------------------
// Find panel
//---------------------------------------------------------------------------
FSPanelAreaSearchFind::FSPanelAreaSearchFind(FSAreaSearch* pointer)
: LLPanel(),
mFSAreaSearch(pointer)
{
}
BOOL FSPanelAreaSearchFind::postBuild()
{
mNameLineEditor = getChild<LLLineEditor>("name_search");
mNameLineEditor->setCommitCallback(boost::bind(&FSAreaSearch::onCommitLine, mFSAreaSearch));
mDescriptionLineEditor = getChild<LLLineEditor>("description_search");
mDescriptionLineEditor->setCommitCallback(boost::bind(&FSAreaSearch::onCommitLine, mFSAreaSearch));
mOwnerLineEditor = getChild<LLLineEditor>("owner_search");
mOwnerLineEditor->setCommitCallback(boost::bind(&FSAreaSearch::onCommitLine, mFSAreaSearch));
mGroupLineEditor = getChild<LLLineEditor>("group_search");
mGroupLineEditor->setCommitCallback(boost::bind(&FSAreaSearch::onCommitLine, mFSAreaSearch));
mCreatorLineEditor = getChild<LLLineEditor>("creator_search");
mCreatorLineEditor->setCommitCallback(boost::bind(&FSAreaSearch::onCommitLine, mFSAreaSearch));
mLastOwnerLineEditor = getChild<LLLineEditor>("last_owner_search");
mLastOwnerLineEditor->setCommitCallback(boost::bind(&FSAreaSearch::onCommitLine, mFSAreaSearch));
mCheckboxRegex = getChild<LLCheckBoxCtrl>("regular_expression");
mCheckboxRegex->setCommitCallback(boost::bind(&FSAreaSearch::onCommitCheckboxRegex, mFSAreaSearch));
mSearchButton = getChild<LLButton>("search");
mSearchButton->setClickedCallback(boost::bind(&FSAreaSearch::onButtonClickedSearch, mFSAreaSearch));
mClearButton = getChild<LLButton>("clear");
mClearButton->setClickedCallback(boost::bind(&FSPanelAreaSearchFind::onButtonClickedClear, this));
return LLPanel::postBuild();
}
// virtual
FSPanelAreaSearchFind::~FSPanelAreaSearchFind()
{ }
void FSPanelAreaSearchFind::onButtonClickedClear()
{
mNameLineEditor->clear();
mDescriptionLineEditor->clear();
mOwnerLineEditor->clear();
mGroupLineEditor->clear();
mCreatorLineEditor->clear();
mLastOwnerLineEditor->clear();
mFSAreaSearch->clearSearchText();
}
// handle the "enter" key
BOOL FSPanelAreaSearchFind::handleKeyHere(KEY key, MASK mask)
{
if( KEY_RETURN == key )
{
mFSAreaSearch->onButtonClickedSearch();
return TRUE;
}
return LLPanel::handleKeyHere(key, mask);
}
//---------------------------------------------------------------------------
// Filter panel
//---------------------------------------------------------------------------
FSPanelAreaSearchFilter::FSPanelAreaSearchFilter(FSAreaSearch* pointer)
: LLPanel(),
mFSAreaSearch(pointer)
{
}
BOOL FSPanelAreaSearchFilter::postBuild()
{
mCheckboxLocked = getChild<LLCheckBoxCtrl>("filter_locked");
mCheckboxLocked->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxPhysical = getChild<LLCheckBoxCtrl>("filter_physical");
mCheckboxPhysical->setEnabled(FALSE);
mCheckboxPhysical->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxTemporary = getChild<LLCheckBoxCtrl>("filter_temporary");
mCheckboxTemporary->setEnabled(FALSE);
mCheckboxTemporary->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxPhantom = getChild<LLCheckBoxCtrl>("filter_phantom");
mCheckboxPhantom->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxForSale = getChild<LLCheckBoxCtrl>("filter_for_sale");
mCheckboxForSale->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxAttachment = getChild<LLCheckBoxCtrl>("filter_attachment");
mCheckboxAttachment->setEnabled(FALSE);
mCheckboxAttachment->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mSpinForSaleMinValue= getChild<LLSpinCtrl>("min_price");
mSpinForSaleMinValue->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitSpin, this));
mSpinForSaleMaxValue= getChild<LLSpinCtrl>("max_price");
mSpinForSaleMaxValue->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitSpin, this));
mComboClickAction = getChild<LLComboBox>("click_action");
mComboClickAction->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCombo, this));
mCheckboxExcludeAttachment = getChild<LLCheckBoxCtrl>("exclude_attachment");
mCheckboxExcludeAttachment->set(TRUE);
mCheckboxExcludeAttachment->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxExcludePhysics = getChild<LLCheckBoxCtrl>("exclude_physical");
mCheckboxExcludePhysics->set(TRUE);
mCheckboxExcludePhysics->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxExcludetemporary = getChild<LLCheckBoxCtrl>("exclude_temporary");
mCheckboxExcludetemporary->set(TRUE);
mCheckboxExcludetemporary->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxExcludeChildPrim = getChild<LLCheckBoxCtrl>("exclude_childprim");
mCheckboxExcludeChildPrim->set(TRUE);
mCheckboxExcludeChildPrim->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxExcludeNeighborRegions = getChild<LLCheckBoxCtrl>("exclude_neighbor_region");
mCheckboxExcludeNeighborRegions->set(TRUE);
mCheckboxExcludeNeighborRegions->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mButtonApply = getChild<LLButton>("apply");
mButtonApply->setClickedCallback(boost::bind(&FSAreaSearch::onButtonClickedSearch, mFSAreaSearch));
mCheckboxDistance = getChild<LLCheckBoxCtrl>("filter_distance");
mCheckboxDistance->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mSpinDistanceMinValue = getChild<LLSpinCtrl>("min_distance");
mSpinDistanceMinValue->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitSpin, this));
mSpinDistanceMaxValue= getChild<LLSpinCtrl>("max_distance");
mSpinDistanceMaxValue->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitSpin, this));
mCheckboxMoaP = getChild<LLCheckBoxCtrl>("filter_moap");
mCheckboxMoaP->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxPermCopy = getChild<LLCheckBoxCtrl>("filter_perm_copy");
mCheckboxPermCopy->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxPermModify = getChild<LLCheckBoxCtrl>("filter_perm_modify");
mCheckboxPermModify->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxPermTransfer = getChild<LLCheckBoxCtrl>("filter_perm_transfer");
mCheckboxPermTransfer->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
mCheckboxAgentParcelOnly = getChild<LLCheckBoxCtrl>("filter_agent_parcel_only");
mCheckboxAgentParcelOnly->setCommitCallback(boost::bind(&FSPanelAreaSearchFilter::onCommitCheckbox, this));
return LLPanel::postBuild();
}
// virtual
FSPanelAreaSearchFilter::~FSPanelAreaSearchFilter()
{ }
void FSPanelAreaSearchFilter::onCommitCheckbox()
{
mFSAreaSearch->setFilterLocked(mCheckboxLocked->get());
mFSAreaSearch->setFilterPhantom(mCheckboxPhantom->get());
mFSAreaSearch->setFilterForSale(mCheckboxForSale->get());
mFSAreaSearch->setFilterDistance(mCheckboxDistance->get());
mFSAreaSearch->setFilterMoaP(mCheckboxMoaP->get());
if (mCheckboxExcludePhysics->get())
{
mFSAreaSearch->setFilterPhysical(false);
mCheckboxPhysical->set(FALSE);
mCheckboxPhysical->setEnabled(FALSE);
mFSAreaSearch->setExcludePhysics(true);
}
else
{
mCheckboxPhysical->setEnabled(TRUE);
mFSAreaSearch->setExcludePhysics(false);
}
mFSAreaSearch->setFilterPhysical(mCheckboxPhysical->get());
if (mCheckboxExcludetemporary->get())
{
mFSAreaSearch->setFilterTemporary(false);
mCheckboxTemporary->set(FALSE);
mCheckboxTemporary->setEnabled(FALSE);
mFSAreaSearch->setExcludetemporary(true);
}
else
{
mCheckboxTemporary->setEnabled(TRUE);
mFSAreaSearch->setExcludetemporary(false);
}
mFSAreaSearch->setFilterTemporary(mCheckboxTemporary->get());
if (mCheckboxExcludeAttachment->get())
{
mFSAreaSearch->setFilterAttachment(false);
mCheckboxAttachment->set(FALSE);
mCheckboxAttachment->setEnabled(FALSE);
mFSAreaSearch->setExcludeAttachment(true);
}
else
{
mCheckboxAttachment->setEnabled(TRUE);
mFSAreaSearch->setExcludeAttachment(false);
}
mFSAreaSearch->setFilterAttachment(mCheckboxAttachment->get());
mFSAreaSearch->setExcludeChildPrims(mCheckboxExcludeChildPrim->get());
mFSAreaSearch->setExcludeNeighborRegions(mCheckboxExcludeNeighborRegions->get());
mFSAreaSearch->setFilterPermCopy(mCheckboxPermCopy->get());
mFSAreaSearch->setFilterPermModify(mCheckboxPermModify->get());
mFSAreaSearch->setFilterPermTransfer(mCheckboxPermTransfer->get());
mFSAreaSearch->setFilterAgentParcelOnly(mCheckboxAgentParcelOnly->get());
}
void FSPanelAreaSearchFilter::onCommitSpin()
{
mFSAreaSearch->setFilterForSaleMin(mSpinForSaleMinValue->getValue().asInteger());
mFSAreaSearch->setFilterForSaleMax(mSpinForSaleMaxValue->getValue().asInteger());
mFSAreaSearch->setFilterDistanceMin(mSpinDistanceMinValue->getValue().asInteger());
mFSAreaSearch->setFilterDistanceMax(mSpinDistanceMaxValue->getValue().asInteger());
}
void FSPanelAreaSearchFilter::onCommitCombo()
{
if (mComboClickAction->getCurrentIndex() > 0)
{
mFSAreaSearch->setFilterClickAction(true);
mFSAreaSearch->setFilterClickActionType((U8)mComboClickAction->getCurrentIndex());
}
else
{
mFSAreaSearch->setFilterClickAction(false);
mFSAreaSearch->setFilterClickActionType(0);
}
}
//---------------------------------------------------------------------------
// Options tab
//---------------------------------------------------------------------------
FSPanelAreaSearchOptions::FSPanelAreaSearchOptions(FSAreaSearch* pointer)
: LLPanel(),
mFSAreaSearch(pointer)
{
mCommitCallbackRegistrar.add("AreaSearch.DisplayColumn", boost::bind(&FSPanelAreaSearchOptions::onCommitCheckboxDisplayColumn, this, _2));
mEnableCallbackRegistrar.add("AreaSearch.EnableColumn", boost::bind(&FSPanelAreaSearchOptions::onEnableColumnVisibilityChecked, this, _2));
}
// virtual
FSPanelAreaSearchOptions::~FSPanelAreaSearchOptions()
{ }
void FSPanelAreaSearchOptions::onCommitCheckboxDisplayColumn(const LLSD& userdata)
{
std::string column_name = userdata.asString();
if (column_name.empty())
{
LL_WARNS("FSAreaSearch") << "Missing action text." << LL_ENDL;
return;
}
mFSAreaSearch->getPanelList()->onColumnVisibilityChecked(userdata);
}
bool FSPanelAreaSearchOptions::onEnableColumnVisibilityChecked(const LLSD& userdata)
{
return mFSAreaSearch->getPanelList()->onEnableColumnVisibilityChecked(userdata);
}
//---------------------------------------------------------------------------
// Advanced tab
//---------------------------------------------------------------------------
FSPanelAreaSearchAdvanced::FSPanelAreaSearchAdvanced(FSAreaSearch* pointer)
: LLPanel()
{
}
BOOL FSPanelAreaSearchAdvanced::postBuild()
{
mCheckboxClickTouch = getChild<LLCheckBoxCtrl>("double_click_touch");
mCheckboxClickBuy = getChild<LLCheckBoxCtrl>("double_click_buy");
mCheckboxClickSit = getChild<LLCheckBoxCtrl>("double_click_sit");
return LLPanel::postBuild();
}
// virtual
FSPanelAreaSearchAdvanced::~FSPanelAreaSearchAdvanced()
{ }
|
{"hexsha": "6b70748d4831a518c7fe84b4c224a1edcae68a9d", "size": 70811, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "indra/newview/fsareasearch.cpp", "max_stars_repo_name": "SaladDais/LLUDP-Encryption", "max_stars_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_stars_repo_licenses": ["ISC"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-29T07:10:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T07:10:03.000Z", "max_issues_repo_path": "indra/newview/fsareasearch.cpp", "max_issues_repo_name": "SaladDais/LLUDP-Encryption", "max_issues_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "indra/newview/fsareasearch.cpp", "max_forks_repo_name": "SaladDais/LLUDP-Encryption", "max_forks_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_forks_repo_licenses": ["ISC"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-10-01T22:22:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T22:22:27.000Z", "avg_line_length": 30.5746977547, "max_line_length": 261, "alphanum_fraction": 0.7137450396, "num_tokens": 18645}
|
From Coq Require Import List.
From Coq Require Import Morphisms.
From Coq Require Import PArith.
From Coq Require Import Permutation.
From Coq Require Import Psatz.
From Coq Require Import SetoidTactics.
From Coq Require Import Field.
From Coq Require Import ZArith.
From Coq Require Import Znumtheory.
From Bignums Require Import BigZ.
Import List.
Require Import Egcd.
Require Import Euler.
Require Import Extras.
Import ListNotations.
Local Open Scope Z.
Class BoardroomAxioms {A : Type} :=
{
elmeq : A -> A -> Prop;
elmeqb : A -> A -> bool;
elmeqb_spec x y : Bool.reflect (elmeq x y) (elmeqb x y);
zero : A;
one : A;
add : A -> A -> A;
mul : A -> A -> A;
opp : A -> A;
inv : A -> A;
pow : A -> Z -> A;
order : Z;
expeq e e' := e mod (order - 1) = e' mod (order - 1);
order_ge_2 : order >= 2;
elmeq_equiv :> Equivalence elmeq;
add_proper :> Proper (elmeq ==> elmeq ==> elmeq) add;
mul_proper :> Proper (elmeq ==> elmeq ==> elmeq) mul;
opp_proper :> Proper (elmeq ==> elmeq) opp;
inv_proper :> Proper (elmeq ==> elmeq) inv;
pow_base_proper :> Proper (elmeq ==> eq ==> elmeq) pow;
pow_exp_proper a :
~(elmeq a zero) -> Proper (expeq ==> elmeq) (pow a);
one_neq_zero : ~(elmeq one zero);
add_comm a b : elmeq (add a b) (add b a);
add_assoc a b c : elmeq (add a (add b c)) (add (add a b) c);
mul_comm a b : elmeq (mul a b) (mul b a);
mul_assoc a b c : elmeq (mul a (mul b c)) (mul (mul a b) c);
add_0_l a : elmeq (add zero a) a;
mul_0_l a : elmeq (mul zero a) zero;
mul_1_l a : elmeq (mul one a) a;
opp_inv_l a : elmeq (add (opp a) a) zero;
inv_inv_l a : ~(elmeq a zero) -> elmeq (mul (inv a) a) one;
mul_add a b c : elmeq (mul (add a b) c) (add (mul a c) (mul b c));
pow_0_r a :
~(elmeq a zero) ->
elmeq (pow a 0) one;
pow_1_r a : elmeq (pow a 1) a;
pow_opp_1 a :
~(elmeq a zero) ->
elmeq (pow a (-1)) (inv a);
pow_plus a e e' :
~(elmeq a zero) ->
elmeq (pow a (e + e')) (mul (pow a e) (pow a e'));
pow_pow a b e :
~(elmeq a zero) ->
elmeq (pow (pow a b) e)
(pow a (b * e)%Z);
pow_nonzero a e :
~(elmeq a zero) ->
~(elmeq (pow a e) zero);
inv_nonzero a :
~(elmeq a zero) ->
~(elmeq (inv a) zero);
}.
Arguments BoardroomAxioms : clear implicits.
Delimit Scope broom_scope with broom.
Module BoardroomMathNotations.
Infix "==" := elmeq (at level 70) : broom.
Infix "=?" := elmeqb (at level 70) : broom.
Notation "a !== b" := (~(elmeq a b)) (at level 70) : broom.
Notation "0" := zero : broom.
Notation "1" := one : broom.
Infix "+" := add : broom.
Infix "*" := mul : broom.
Infix "^" := pow : broom.
Notation "a 'exp=' b" := (expeq a b) (at level 70) : broom.
Notation "a 'exp<>' b" := (~(expeq a b)) (at level 70) : broom.
End BoardroomMathNotations.
Import BoardroomMathNotations.
Local Open Scope broom.
Global Instance oeq_equivalence {A : Type} (field : BoardroomAxioms A) : Equivalence expeq.
Proof.
unfold expeq.
constructor.
- constructor.
- repeat intro; auto.
- now intros ? ? ? -> ->.
Qed.
Definition BoardroomAxioms_field_theory {A : Type} (field : BoardroomAxioms A) :
field_theory
0 1
add mul
(fun x y => x + (opp y)) opp
(fun x y => x * inv y) inv
elmeq.
Proof.
constructor; [constructor| | |].
- apply add_0_l.
- apply add_comm.
- apply add_assoc.
- apply mul_1_l.
- apply mul_comm.
- apply mul_assoc.
- apply mul_add.
- reflexivity.
- intros x. rewrite add_comm. apply opp_inv_l.
- apply one_neq_zero.
- reflexivity.
- apply inv_inv_l.
Qed.
Class Generator {A : Type} (field : BoardroomAxioms A) :=
build_generator {
generator : A;
generator_nonzero : generator !== 0;
generator_generates a : a !== 0 -> exists! e, 0 <= e < order - 1 /\ generator^e == a;
}.
Class DiscreteLog {A : Type} (field : BoardroomAxioms A) (g : Generator field) :=
build_log {
(* This is computationally intractable, but we still elmequire it
for ease of specification *)
log : A -> Z;
log_proper :> Proper (elmeq ==> expeq) log;
pow_log a :
a !== 0 ->
generator ^ (log a) == a;
log_1_l : log 1 exp= 0%Z;
log_mul a b :
a !== 0 ->
b !== 0 ->
log (a * b) exp= log a + log b;
log_inv a : log (inv a) exp= -log a;
log_generator : log generator = 1%Z;
}.
Section WithBoardroomAxioms.
Context {A : Type}.
Context {field : BoardroomAxioms A}.
Context {gen : Generator field}.
Context {disc_log : DiscreteLog field gen}.
Fixpoint prod (l : list A) : A :=
match l with
| [] => one
| x :: xs => x * prod xs
end.
Definition compute_public_key (sk : Z) : A :=
generator ^ sk.
Definition reconstructed_key (pks : list A) (n : nat) : A :=
let lprod := prod (firstn n pks) in
let rprod := inv (prod (skipn (S n) pks)) in
lprod * rprod.
Definition compute_public_vote (rk : A) (sk : Z) (sv : bool) : A :=
rk ^ sk * if sv then generator else 1.
Fixpoint bruteforce_tally_aux
(n : nat)
(votes_product : A) : option nat :=
if generator ^ (Z.of_nat n) =? votes_product then
Some n
else
match n with
| 0 => None
| S n => bruteforce_tally_aux n votes_product
end%nat.
Definition bruteforce_tally (votes : list A) : option nat :=
bruteforce_tally_aux (length votes) (prod votes).
Add Field ff : (BoardroomAxioms_field_theory field).
Local Open Scope broom.
Hint Resolve one_neq_zero pow_nonzero generator_nonzero inv_nonzero : core.
Instance plus_expeq_proper : Proper (expeq ==> expeq ==> expeq) Z.add.
Proof.
intros x x' xeq y y' yeq.
unfold "exp=" in *.
assert (order - 1 <> 0)%Z by (pose proof order_ge_2; lia).
now rewrite Z.add_mod, xeq, yeq, <- Z.add_mod.
Qed.
Instance mul_expeq_proper : Proper (expeq ==> expeq ==> expeq) Z.mul.
Proof.
intros x x' xeq y y' yeq.
unfold "exp=" in *.
assert (order - 1 <> 0)%Z by (pose proof order_ge_2; lia).
now rewrite Z.mul_mod, xeq, yeq, <- Z.mul_mod.
Qed.
Instance sub_expeq_proper : Proper (expeq ==> expeq ==> expeq) Z.sub.
Proof.
intros x x' xeq y y' yeq.
unfold "exp=" in *.
assert (order - 1 <> 0)%Z by (pose proof order_ge_2; lia).
now rewrite Zminus_mod, xeq, yeq, <- Zminus_mod.
Qed.
Instance opp_expeq_proper : Proper (expeq ==> expeq) Z.opp.
Proof.
intros x x' xeq.
rewrite <- !Z.sub_0_l.
now rewrite xeq.
Qed.
Lemma log_pow a e :
a !== 0 ->
log (a ^ e) exp= e * log a.
Proof.
intros anz.
induction e using Z.peano_ind.
- rewrite pow_0_r; auto.
apply log_1_l.
- replace (Z.succ e) with (e + 1)%Z by lia.
rewrite pow_plus by auto.
rewrite log_mul; auto.
rewrite IHe.
replace ((e + 1) * log a)%Z with (e * log a + log a)%Z by lia.
now rewrite pow_1_r.
- replace (Z.pred e) with (e + (-1))%Z by lia.
rewrite pow_plus by auto.
rewrite log_mul by auto.
rewrite IHe.
rewrite (pow_opp_1 a) by auto.
rewrite log_inv.
now replace ((e + -1) * log a)%Z with (e * log a + - log a)%Z by lia.
Qed.
Instance pow_generator_proper : Proper (expeq ==> elmeq) (pow generator) :=
pow_exp_proper _ generator_nonzero.
Lemma log_both a b :
a !== 0 ->
b !== 0 ->
log a exp= log b ->
a == b.
Proof.
intros an0 bn0 dleq.
assert (generator ^ log a == generator ^ log a) as H by reflexivity.
rewrite dleq in H at 1.
now rewrite !pow_log in H by auto.
Qed.
Lemma log_pow_generator e :
log (generator ^ e) exp= e.
Proof.
rewrite log_pow; auto.
rewrite log_generator.
now rewrite Z.mul_1_r.
Qed.
Lemma int_domain a b :
a !== 0 ->
b !== 0 ->
a * b !== 0.
Proof.
intros an0 bn0.
apply (@field_is_integral_domain
A
0 1
add
mul
(fun a b => a + (opp b))
opp
(fun a b => a * (inv b))
inv); eauto.
- typeclasses eauto.
- constructor; typeclasses eauto.
- apply F2AF.
+ typeclasses eauto.
+ constructor; typeclasses eauto.
+ apply (BoardroomAxioms_field_theory field).
Qed.
Hint Resolve int_domain : core.
Lemma prod_units (xs : list A) :
All (fun x => x !== 0) xs <-> prod xs !== 0.
Proof.
induction xs as [|x xs IH]; cbn in *; auto.
- split; auto.
- split.
+ intros.
apply int_domain; intuition.
+ intros xprod.
split.
* intros eq; rewrite eq in *; rewrite mul_0_l in xprod.
destruct (xprod ltac:(reflexivity)).
* apply IH.
intros eq; rewrite eq in *; rewrite mul_comm, mul_0_l in xprod.
destruct (xprod ltac:(reflexivity)).
Qed.
Hint Resolve -> prod_units : core.
Hint Resolve <- prod_units : core.
Lemma compute_public_key_unit sk :
compute_public_key sk !== 0.
Proof. apply pow_nonzero, generator_nonzero. Qed.
Hint Resolve compute_public_key_unit : core.
Lemma compute_public_keys_units sks :
All (fun x => x !== 0) (map compute_public_key sks).
Proof.
induction sks as [|sk sks IH]; cbn; auto.
Qed.
Hint Resolve compute_public_keys_units : core.
Lemma reconstructed_key_unit pks i :
All (fun a => a !== 0) pks ->
reconstructed_key pks i !== 0.
Proof.
intros all.
unfold reconstructed_key.
apply int_domain.
- apply prod_units.
apply (all_incl (firstn i pks) pks); auto.
apply firstn_incl.
- apply inv_nonzero.
apply prod_units.
apply (all_incl (skipn (S i) pks) pks); auto.
apply skipn_incl.
Qed.
Lemma compute_public_vote_unit rk sk sv :
rk !== 0 ->
compute_public_vote rk sk sv !== 0.
Proof.
intros rk_unit.
unfold compute_public_vote.
destruct sv; auto.
Qed.
Hint Resolve
compute_public_key_unit compute_public_keys_units
reconstructed_key_unit compute_public_vote_unit : core.
Lemma log_prod (l : list A) :
All (fun a => a !== 0) l ->
log (prod l) exp= sumZ log l.
Proof.
intros all.
induction l as [|x xs IH]; cbn in *.
- now rewrite log_1_l.
- specialize (IH (proj2 all)).
destruct all.
rewrite log_mul by auto.
now rewrite IH.
Qed.
Lemma prod_firstn_units n l :
prod l !== 0 ->
prod (firstn n l) !== 0.
Proof.
intros prodl.
apply prod_units.
pose proof (firstn_incl n l).
apply all_incl with l; auto.
Qed.
Hint Resolve prod_firstn_units : core.
Lemma prod_skipn_units n l :
prod l !== 0 ->
prod (skipn n l) !== 0.
Proof.
intros prodl.
apply prod_units.
pose proof (skipn_incl n l).
apply all_incl with l; auto.
Qed.
Hint Resolve prod_skipn_units : core.
Local Open Scope Z.
Lemma sum_lemma l :
sumZ (fun i => nth i l 0 *
(sumZ id (firstn i l) -
sumZ id (skipn (S i) l)))
(seq 0 (length l)) = 0.
Proof.
rewrite (sumZ_seq_feq
(fun i => sumZ (fun j => if Nat.ltb j i then nth i l 0 * nth j l 0 else 0)
(seq 0 (length l)) -
sumZ (fun j => if Nat.ltb i j then nth i l 0 * nth j l 0 else 0)
(seq 0 (length l))));
cycle 1.
{
intros i ?.
rewrite Z.mul_sub_distr_l.
rewrite 2!sumZ_mul.
unfold id.
rewrite (sumZ_firstn 0) by (right; lia).
rewrite (sumZ_skipn 0).
apply f_equal2.
- rewrite (sumZ_seq_split i) by lia.
rewrite Z.add_comm.
cbn -[Nat.ltb].
rewrite sumZ_seq_n.
rewrite (sumZ_seq_feq (fun _ => 0)); cycle 1.
{ intros j jlt. destruct (Nat.ltb_spec (j + i) i); auto; lia. }
rewrite sumZ_zero.
cbn -[Nat.ltb].
apply sumZ_seq_feq.
intros j jlt.
destruct (Nat.ltb_spec j i); lia.
- rewrite (sumZ_seq_split (S i)) by lia.
rewrite (sumZ_seq_feq (fun _ => 0)); cycle 1.
{ intros j jlt. destruct (Nat.ltb_spec i j); auto; lia. }
rewrite sumZ_zero.
cbn.
rewrite sumZ_seq_n.
replace (length l - S i)%nat with (length l - i - 1)%nat by lia.
apply sumZ_seq_feq.
intros j jlt.
replace (j + S i)%nat with (S (i + j))%nat by lia.
destruct (Nat.leb_spec i (i + j)); lia.
}
rewrite sumZ_sub.
rewrite sumZ_sumZ_swap.
match goal with
| [|- ?a - ?b = 0] => enough (a = b) by lia
end.
apply sumZ_seq_feq.
intros i ilt.
apply sumZ_seq_feq.
intros j jlt.
destruct (i <? j)%nat; lia.
Qed.
Local Open Scope broom.
Lemma mul_public_votes
(sks : list Z)
(votes : nat -> bool) :
prod (map (fun (i : nat) =>
compute_public_vote
(reconstructed_key (map compute_public_key sks) i)
(nth i sks 0%Z)
(votes i))
(seq 0 (length sks)))
== generator ^ sumZ (fun i => if votes i then 1 else 0)%Z (seq 0 (length sks)).
Proof.
apply log_both; auto.
- induction (seq 0 (length sks)); cbn; auto.
- rewrite log_pow_generator, log_prod; cycle 1.
{
induction (seq 0 (length sks)); cbn; auto.
}
rewrite sumZ_map.
unfold compute_public_vote.
etransitivity.
{
apply sumZ_seq_feq_rel; try typeclasses eauto.
intros i ilt.
rewrite log_mul at 1 by (destruct (votes i); auto).
setoid_replace (log (if votes i then generator else 1))
with (if votes i then 1%Z else 0%Z) at 1;
cycle 1.
- destruct (votes i).
+ rewrite <- (pow_1_r generator).
apply log_pow_generator.
+ apply log_1_l.
- rewrite log_pow at 1 by auto.
unfold reconstructed_key.
rewrite log_mul at 1 by auto.
rewrite log_prod at 1 by auto.
rewrite log_inv at 1.
rewrite log_prod at 1 by auto.
rewrite 2!(sumZ_map_id log) at 1.
rewrite firstn_map, skipn_map, !map_map.
unfold compute_public_key.
assert (forall l, sumZ id (map (fun x => log (generator ^ x)) l) exp= sumZ id l).
{ clear.
intros l.
induction l; cbn; auto.
- reflexivity.
- unfold id at 1 3.
rewrite log_pow_generator.
now rewrite IHl.
}
rewrite 2!H at 1.
rewrite Z.add_opp_r.
reflexivity.
}
rewrite sumZ_add.
rewrite sum_lemma.
reflexivity.
Qed.
Global Instance prod_perm_proper :
Proper (@Permutation A ==> elmeq) prod.
Proof.
intros l l' permeq.
induction permeq.
- reflexivity.
- cbn.
now rewrite IHpermeq.
- cbn.
rewrite !mul_assoc.
now rewrite (mul_comm y).
- now rewrite IHpermeq1, IHpermeq2.
Qed.
Global Instance bruteforce_tally_aux_proper :
Proper (eq ==> elmeq ==> eq) bruteforce_tally_aux.
Proof.
intros n ? <- p p' prodeq.
induction n as [|n IH].
- cbn.
destruct (elmeqb_spec (generator^0) p),
(elmeqb_spec (generator^0) p'); auto.
+ rewrite prodeq in e; contradiction.
+ rewrite <- prodeq in e; contradiction.
- cbn.
destruct (elmeqb_spec (generator^Z.pos (Pos.of_succ_nat n)) p),
(elmeqb_spec (generator^Z.pos (Pos.of_succ_nat n)) p'); auto.
+ rewrite prodeq in e; contradiction.
+ rewrite <- prodeq in e; contradiction.
Qed.
Global Instance bruteforce_tally_proper :
Proper (@Permutation A ==> eq) bruteforce_tally.
Proof.
unfold bruteforce_tally.
now intros ? ? <-.
Qed.
Global Instance elmeqb_elmeq_proper :
Proper (elmeq ==> elmeq ==> eq) elmeqb.
Proof.
intros x x' xeq y y' yeq.
destruct (elmeqb_spec x y), (elmeqb_spec x' y'); auto.
- contradiction n.
now rewrite <- xeq, <- yeq.
- contradiction n.
now rewrite xeq, yeq.
Qed.
Lemma elmeqb_refl a :
a =? a = true.
Proof.
destruct (elmeqb_spec a a); [easy|].
contradiction n.
reflexivity.
Qed.
Lemma bruteforce_tally_aux_correct result max :
Z.of_nat max < order - 1 ->
(result <= max)%nat ->
bruteforce_tally_aux max (generator ^ Z.of_nat result) = Some result.
Proof.
intros max_lt result_le.
induction max as [|max IH].
- replace result with 0%nat by lia.
cbn.
now rewrite elmeqb_refl.
- destruct (Nat.eq_dec result (S max)) as [->|?].
+ cbn.
now rewrite elmeqb_refl.
+ cbn -[Z.of_nat].
destruct (elmeqb_spec (generator ^ Z.of_nat (S max))
(generator ^ Z.of_nat result)) as [eq|?]; auto.
* pose proof (generator_generates (generator ^ Z.of_nat result) ltac:(auto)).
destruct H as [e [sat unique]].
unshelve epose proof (unique (Z.of_nat (S max)) _) as smax.
{ split; auto; lia. }
unshelve epose proof (unique (Z.of_nat result) _) as res.
{ split; [lia|reflexivity]. }
rewrite <- (Z2Nat.id e) in smax, res by lia.
apply Nat2Z.inj in smax.
apply Nat2Z.inj in res.
congruence.
* apply IH; lia.
Qed.
Lemma sumZ_sumnat_votes (svs : nat -> bool) l :
sumZ (fun i => if svs i then 1%Z else 0%Z) l =
Z.of_nat (sumnat (fun i => if svs i then 1%nat else 0%nat) l).
Proof.
induction l as [|x xs IH]; auto.
cbn.
rewrite IH, Nat2Z.inj_add.
destruct (svs x); lia.
Qed.
Lemma bruteforce_tally_correct
{B}
(bs : list B)
(index : B -> nat)
(sks : B -> Z)
(pks : list A)
(svs : B -> bool)
(pvs : B -> A) :
Z.of_nat (length bs) < order - 1 ->
Permutation (map index bs) (seq 0 (length bs)) ->
length pks = length bs ->
(forall b, In b bs -> nth_error pks (index b) = Some (compute_public_key (sks b))) ->
(forall b, In b bs -> pvs b = compute_public_vote
(reconstructed_key pks (index b))
(sks b)
(svs b)) ->
bruteforce_tally (map pvs bs) =
Some (sumnat (fun b => if svs b then 1 else 0)%nat bs).
Proof.
intros countlt perm len_pks pks_sks pvs_svs.
set (geti i := find (fun b => index b =? i)%nat bs).
set (sks_list := map (fun i => match geti i with
| Some x => sks x
| None => 0%Z
end)
(seq 0 (length bs))).
set (svsi i := match geti i with
| Some x => svs x
| None => false
end).
pose proof (mul_public_votes sks_list svsi).
assert (geti_i: forall b i,
In b bs ->
index b = i ->
geti i = Some b).
{
clear -perm.
intros b i isin index_eq.
pose proof (seq_NoDup (length bs) 0).
rewrite <- perm in H.
clear perm.
subst geti.
cbn.
induction bs as [|b' bs IH]; cbn in *; try easy.
destruct isin as [<-|?].
- now rewrite index_eq, Nat.eqb_refl.
- inversion H; subst.
destruct (Nat.eqb_spec (index b') (index b)).
+ rewrite e in H.
apply (in_map index) in H0.
rewrite <- e in H0.
contradiction.
+ inversion H; apply IH; auto.
}
assert (map_on_bs:
forall {C} (f : B -> C) (def : C),
Permutation (map f bs)
(map (fun i => match geti i with
| Some b => f b
| None => def
end) (seq 0 (length bs)))).
{
intros C f def.
rewrite <- perm.
rewrite map_map.
match goal with
| [|- Permutation ?l ?l'] => enough (l = l') as eq by (now rewrite eq)
end.
apply map_ext_in.
intros b inbbs.
now rewrite geti_i with (b := b) by auto.
}
assert (Permutation
(map (fun i : nat =>
compute_public_vote
(reconstructed_key
(map compute_public_key sks_list) i)
(nth i sks_list 0%Z) (svsi i))
(seq 0 (length sks_list)))
(map pvs bs))%nat.
{
rewrite (map_on_bs _ pvs 0).
unfold sks_list.
rewrite map_length, seq_length.
match goal with
| [|- Permutation ?l ?l'] => enough (l = l') as eq by (now rewrite eq)
end.
apply map_ext_in.
intros i iin.
assert (i < length bs)%nat by (apply in_seq in iin; lia).
rewrite map_nth_alt with (d2 := 0%nat) by (now rewrite seq_length).
rewrite seq_nth.
cbn.
rewrite <- perm in iin.
apply in_map_iff in iin.
destruct iin as [b [indexb inbbs]].
unfold svsi.
rewrite geti_i with (b := b) by auto.
rewrite pvs_svs by auto.
subst i.
rewrite map_map.
f_equal.
- f_equal.
clear -perm len_pks pks_sks geti_i.
apply list_eq_nth.
{ now rewrite map_length, seq_length. }
intros i a a' ntha ntha'.
assert (isin: In i (seq 0 (length bs))).
{
apply in_seq.
split; try lia.
rewrite <- len_pks.
cbn; apply nth_error_Some.
congruence.
}
assert (i < length bs)%nat by (apply in_seq in isin; lia).
rewrite <- perm in isin.
apply in_map_iff in isin.
destruct isin as [b [indexb inbbs]].
subst i.
rewrite pks_sks in ntha' by auto.
rewrite map_nth_error with (d := index b) in ntha by (now rewrite nth_error_seq_in).
rewrite geti_i with (b := b) in ntha by auto.
congruence.
- auto.
}
rewrite <- H0.
unfold bruteforce_tally.
rewrite map_length, seq_length.
rewrite mul_public_votes.
rewrite <- (sumnat_map svs (fun sv => if sv then 1%nat else 0%nat)).
enough (Permutation (map svs bs) (map svsi (seq 0 (length sks_list)))).
{
rewrite H1.
rewrite sumnat_map.
rewrite sumZ_sumnat_votes.
unfold sks_list.
rewrite map_length, seq_length.
rewrite bruteforce_tally_aux_correct; auto.
rewrite <- Nat.mul_1_l.
rewrite <- (seq_length (length bs) 0) at 2.
apply sumnat_max.
intros i iin.
destruct (svsi i); lia.
}
unfold sks_list.
rewrite map_length, !seq_length.
rewrite <- perm.
pose proof (seq_NoDup (length bs) 0).
rewrite <- perm in H1.
clear -H1.
induction bs as [|b bs IH]; auto.
subst geti svsi; cbn in *.
rewrite Nat.eqb_refl.
inversion H1; subst.
rewrite (map_ext_in _ (fun i => match find (fun b => (index b =? i)%nat) bs with
| Some x => svs x
| None => false
end)); cycle 1.
{
intros a ain.
destruct (Nat.eqb_spec (index b) a); auto.
subst a; contradiction.
}
apply Permutation_cons; auto.
Qed.
End WithBoardroomAxioms.
Module Zp.
Local Open Scope Z.
Fixpoint mod_pow_pos_aux (a : Z) (x : positive) (m : Z) (r : Z) : Z :=
match x with
| x~0%positive => mod_pow_pos_aux (a * a mod m) x m r
| x~1%positive => mod_pow_pos_aux (a * a mod m) x m (r * a mod m)
| _ => r * a mod m
end.
Definition mod_pow_pos (a : Z) (x : positive) (m : Z) : Z :=
mod_pow_pos_aux a x m 1.
Definition mod_inv (a : Z) (p : Z) : Z :=
fst (egcd a p) mod p.
Definition mod_pow (a x p : Z) : Z :=
match x with
| Z0 => a ^ 0 mod p
| Zpos x => mod_pow_pos a x p
| Zneg x => mod_inv (mod_pow_pos a x p) p
end.
Lemma Z_pow_pos_mod_idemp a x m :
Z.pow_pos (a mod m) x mod m = Z.pow_pos a x mod m.
Proof.
destruct (m =? 0) eqn:mzero.
{
apply Z.eqb_eq in mzero.
rewrite mzero.
now rewrite 2!Zmod_0_r.
}
apply Z.eqb_neq in mzero.
unfold Z.pow_pos.
assert (H: forall start l x, Pos.iter (Z.mul l) start x = start * Pos.iter (Z.mul l) 1 x).
{
clear.
intros start l x.
revert start.
induction x; intros start.
- cbn.
rewrite 2!IHx.
rewrite (IHx (Pos.iter (Z.mul l) 1 x)).
lia.
- cbn.
rewrite 2!IHx.
rewrite (IHx (Pos.iter (Z.mul l) 1 x)).
lia.
- cbn.
lia.
}
induction x.
- cbn.
rewrite (H _ (a mod m)).
rewrite (H _ a).
rewrite Z.mul_assoc.
assert (H2: forall a b c,
((a mod m) * b * c) mod m = ((a mod m) * (b mod m) * (c mod m)) mod m).
{
clear.
intros.
rewrite <- Z.mul_assoc.
rewrite Zmult_mod_idemp_l.
rewrite <- Z.mul_assoc.
rewrite Zmult_mod_idemp_l.
rewrite 2!Z.mul_assoc.
rewrite (Z.mul_comm _ (c mod m)).
rewrite Zmult_mod_idemp_l.
rewrite Z.mul_assoc.
rewrite (Z.mul_comm _ (b mod m)).
rewrite Zmult_mod_idemp_l.
replace (b * (c * a)) with (a * b * c) by lia; auto.
}
rewrite H2.
rewrite IHx.
rewrite <- H2.
rewrite <- Z.mul_assoc.
now rewrite Zmult_mod_idemp_l.
- cbn.
rewrite H.
rewrite Z.mul_mod by auto.
rewrite IHx.
rewrite <- Z.mul_mod by auto.
now rewrite <- H.
- cbn.
rewrite 2!Z.mul_1_r.
now apply Z.mod_mod.
Qed.
Lemma mod_pow_pos_aux_0_r a x r :
mod_pow_pos_aux a x 0 r = 0.
Proof.
revert a r.
induction x; intros a r; cbn.
+ now (repeat rewrite ?IHx, ?Zmod_0_r).
+ now (repeat rewrite ?IHx, ?Zmod_0_r).
+ now rewrite !Zmod_0_r.
Qed.
Lemma mod_pow_pos_0_r a x :
mod_pow_pos a x 0 = 0.
Proof. apply mod_pow_pos_aux_0_r. Qed.
Lemma mod_pow_0_r a x :
mod_pow a x 0 = 0.
Proof.
destruct x; auto; cbn.
- apply mod_pow_pos_0_r.
- now rewrite mod_pow_pos_0_r.
Qed.
Lemma mod_pow_pos_aux_spec a x p r :
mod_pow_pos_aux a x p r = r * Z.pow_pos a x mod p.
Proof.
destruct (Z.eq_dec p 0) as [->|nonzero].
- now rewrite mod_pow_pos_aux_0_r, Zmod_0_r.
- revert a r.
induction x; intros a r.
+ cbn -[Z.pow_pos].
specialize (IHx ((a * a) mod p) ((r * a) mod p)).
rewrite IHx.
rewrite <- Z.mul_mod_idemp_r by auto.
rewrite Z_pow_pos_mod_idemp.
rewrite <- Z.mul_mod by auto.
replace (x~1)%positive with (x*2+1)%positive by lia.
rewrite Zpower_pos_is_exp.
cbn.
rewrite 2!Z.pow_pos_fold.
rewrite Z.pow_mul_l.
rewrite <- Z.pow_add_r by (auto with zarith).
rewrite Zred_factor1.
cbn.
f_equal; lia.
+ cbn -[Z.pow_pos].
rewrite IHx.
rewrite <- Zmult_mod_idemp_r.
rewrite Z_pow_pos_mod_idemp.
rewrite Zmult_mod_idemp_r.
replace (x~0)%positive with (x*2)%positive by lia.
rewrite 2!Z.pow_pos_fold.
rewrite Z.pow_mul_l.
rewrite <- Z.pow_add_r by (auto with zarith).
now rewrite Zred_factor1.
+ cbn.
f_equal; lia.
Qed.
Lemma mod_pow_pos_spec a x p :
mod_pow_pos a x p = Z.pow_pos a x mod p.
Proof.
pose proof (mod_pow_pos_aux_spec a x p 1).
now rewrite Z.mul_1_l in H.
Qed.
Lemma Z_pow_mod_idemp a x p :
(a mod p)^x mod p = a^x mod p.
Proof.
destruct x; auto.
cbn.
apply Z_pow_pos_mod_idemp.
Qed.
Lemma mod_pow_pos_aux_mod_idemp a x p r :
mod_pow_pos_aux (a mod p) x p r = mod_pow_pos_aux a x p r.
Proof.
destruct (Z.eq_dec p 0) as [->|?].
- now rewrite !mod_pow_pos_aux_0_r.
- revert a r.
induction x; intros a r; cbn in *.
+ rewrite <- Z.mul_mod by auto.
now rewrite Z.mul_mod_idemp_r by auto.
+ now rewrite <- Z.mul_mod by auto.
+ now rewrite Z.mul_mod_idemp_r by auto.
Qed.
Lemma mod_pow_pos_mod_idemp a x p :
mod_pow_pos (a mod p) x p = mod_pow_pos a x p.
Proof. apply mod_pow_pos_aux_mod_idemp. Qed.
Lemma mod_pow_mod_idemp a e p :
mod_pow (a mod p) e p = mod_pow a e p.
Proof.
unfold mod_pow.
destruct e.
- now rewrite Z_pow_mod_idemp.
- now rewrite mod_pow_pos_mod_idemp.
- now rewrite mod_pow_pos_mod_idemp.
Qed.
Lemma mod_pow_pos_aux_mod a x p r :
mod_pow_pos_aux a x p r mod p = mod_pow_pos_aux a x p r.
Proof.
destruct (Z.eq_dec p 0) as [->|?].
- now rewrite mod_pow_pos_aux_0_r.
- rewrite mod_pow_pos_aux_spec.
now rewrite Z.mod_mod by auto.
Qed.
Lemma mod_pow_pos_mod a x p :
mod_pow_pos a x p mod p = mod_pow_pos a x p.
Proof. apply mod_pow_pos_aux_mod. Qed.
Lemma mod_inv_mod a p :
mod_inv a p mod p = mod_inv a p.
Proof.
unfold mod_inv.
now rewrite Zmod_mod.
Qed.
Lemma mod_pow_mod a x p :
mod_pow a x p mod p = mod_pow a x p.
Proof.
destruct (Z.eq_dec p 0) as [->|?].
- now rewrite mod_pow_0_r.
- destruct x; cbn.
+ now rewrite Z.mod_mod by auto.
+ now rewrite mod_pow_pos_mod.
+ now rewrite mod_inv_mod.
Qed.
Lemma mul_mod_inv a p :
prime p ->
a mod p <> 0 ->
a * mod_inv a p mod p = 1.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
unfold mod_inv.
rewrite Z.mul_mod_idemp_r by lia.
rewrite <- (Z.mod_1_l p) by lia.
apply mul_fst_egcd.
apply rel_prime_sym, prime_rel_prime; [easy|].
intros eq; contradiction ap0.
apply Z.mod_divide; [lia|easy].
Qed.
Lemma mod_mul_both_l a b c p :
prime p ->
c mod p <> 0 ->
(c * a mod p = c * b mod p <-> a mod p = b mod p).
Proof.
intros isprime cp0.
pose proof (prime_ge_2 _ isprime).
split.
- intros eq.
rewrite <- (Z.mul_1_l a).
rewrite <- (Z.mul_1_l b).
rewrite <- (mul_mod_inv c _ isprime cp0).
rewrite !Z.mul_mod_idemp_l by lia.
rewrite (Z.mul_comm c).
rewrite <- 2!Z.mul_assoc.
rewrite <- (Z.mul_mod_idemp_r _ (c * a)) by lia.
rewrite <- (Z.mul_mod_idemp_r _ (c * b)) by lia.
now rewrite eq.
- intros eq.
now rewrite <- Z.mul_mod_idemp_r, eq, Z.mul_mod_idemp_r by lia.
Qed.
Lemma mul_mod_nonzero a b p :
prime p ->
a mod p <> 0 ->
b mod p <> 0 ->
a * b mod p <> 0.
Proof.
intros isprime ap0 bp0.
intros abp0.
pose proof (prime_ge_2 _ isprime).
pose proof (proj1 (Z.mod_divide _ p ltac:(lia)) abp0) as pdiv.
pose proof (prime_mult _ isprime _ _ pdiv) as onediv.
destruct onediv as [div|div]; apply Z.mod_divide in div; lia.
Qed.
Hint Resolve mul_mod_nonzero : core.
Lemma mod_mod_nonzero a p :
a mod p <> 0 ->
(a mod p) mod p <> 0.
Proof.
intros ap0.
destruct (Z.eq_dec p 0) as [->|?].
- cbn in ap0.
rewrite Zmod_0_r in ap0.
congruence.
- rewrite Z.mod_mod by auto.
auto.
Qed.
Hint Resolve mod_mod_nonzero : core.
Lemma mod_pow_pos_aux_nonzero a x p r :
prime p ->
a mod p <> 0 ->
r mod p <> 0 ->
mod_pow_pos_aux a x p r <> 0.
Proof.
intros prime.
pose proof (prime_ge_2 _ prime).
revert a r.
induction x; intros a r ap0 rp0; cbn; auto.
Qed.
Hint Resolve mod_pow_pos_aux_nonzero : core.
Lemma mod_pow_pos_nonzero a x p :
prime p ->
a mod p <> 0 ->
mod_pow_pos a x p <> 0.
Proof.
intros isprime ap0.
apply mod_pow_pos_aux_nonzero; auto.
pose proof (prime_ge_2 _ isprime).
now rewrite Z.mod_1_l by lia.
Qed.
Hint Resolve mod_pow_pos_nonzero : core.
Lemma mod_inv_nonzero a p :
prime p ->
a mod p <> 0 ->
mod_inv a p <> 0.
Proof.
intros isprime ap0 iszero.
pose proof (prime_ge_2 _ isprime).
rewrite <- (Z.mod_0_l p) in iszero by lia.
rewrite <- mod_inv_mod in iszero.
apply (mod_mul_both_l _ _ a p isprime ap0) in iszero.
rewrite mul_mod_inv in iszero by auto.
rewrite Z.mul_0_r, Z.mod_0_l in iszero; easy.
Qed.
Hint Resolve mod_inv_nonzero : core.
Lemma mod_pow_nonzero a x p :
prime p ->
a mod p <> 0 ->
mod_pow a x p <> 0.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
destruct x; cbn; auto.
- rewrite Z.mod_1_l by lia; discriminate.
- apply mod_inv_nonzero; auto.
rewrite mod_pow_pos_mod; auto.
Qed.
Hint Resolve mod_pow_nonzero : core.
Lemma mod_pow_pos_mod_nonzero a x p :
mod_pow_pos a x p <> 0 ->
mod_pow_pos a x p mod p <> 0.
Proof. rewrite mod_pow_pos_mod; auto. Qed.
Lemma mod_inv_mod_nonzero a p :
mod_inv a p <> 0 ->
mod_inv a p mod p <> 0.
Proof. rewrite mod_inv_mod; auto. Qed.
Lemma mod_pow_mod_nonzero a x p :
mod_pow a x p <> 0 ->
mod_pow a x p mod p <> 0.
Proof. rewrite mod_pow_mod; auto. Qed.
Lemma one_nonzero p :
prime p ->
1 mod p <> 0.
Proof.
intros isprime.
pose proof (prime_ge_2 _ isprime).
now rewrite Z.mod_1_l by lia.
Qed.
Hint Resolve mod_pow_pos_mod_nonzero mod_inv_mod_nonzero mod_pow_mod_nonzero one_nonzero : core.
Lemma mod_inv_mod_idemp a p :
prime p ->
mod_inv (a mod p) p = mod_inv a p.
Proof.
intros isprime.
pose proof (prime_ge_2 _ isprime).
destruct (Z.eqb_spec (a mod p) 0) as [ap0|ap0].
{
rewrite ap0.
apply Zmod_divide in ap0; [|lia].
unfold mod_inv.
now rewrite (egcd_divides a p) by (auto; lia).
}
rewrite <- mod_inv_mod, <- (mod_inv_mod a).
apply mod_mul_both_l with a; auto.
rewrite <- Z.mul_mod_idemp_l by lia.
now rewrite !mul_mod_inv by auto.
Qed.
Lemma mod_pow_pos_fermat a p :
prime p ->
a mod p <> 0 ->
mod_pow_pos a (Z.to_pos (p - 1)) p = 1.
Proof.
intros isprime ap.
pose proof (prime_ge_2 _ isprime).
rewrite mod_pow_pos_spec.
rewrite Z.pow_pos_fold.
rewrite Z2Pos.id by lia.
now apply fermat.
Qed.
Lemma mod_pow_fermat a p :
prime p ->
a mod p <> 0 ->
mod_pow a (p - 1) p = 1.
Proof.
intros isprime ap.
pose proof (prime_ge_2 _ isprime).
pose proof (mod_pow_pos_fermat _ _ isprime ap).
destruct p; try lia.
destruct (Z.pos p - 1) eqn:?; try lia.
assumption.
Qed.
Lemma mod_pow_pos_exp_mul a x x' p :
mod_pow_pos a (x * x') p = mod_pow_pos (mod_pow_pos a x p) x' p.
Proof.
destruct (Z.eq_dec p 0) as [->|?].
- now rewrite !mod_pow_pos_0_r.
- rewrite !mod_pow_pos_spec by auto.
rewrite !Z.pow_pos_fold.
rewrite Pos2Z.inj_mul.
rewrite Z_pow_mod_idemp.
now rewrite <- Z.pow_mul_r by lia.
Qed.
Lemma mod_pow_pos_aux_1_l x p r :
mod_pow_pos_aux 1 x p r = r mod p.
Proof.
destruct (Z.eq_dec p 0) as [->|?].
- now rewrite mod_pow_pos_aux_0_r, Zmod_0_r.
- revert r.
induction x; intros r; cbn.
+ rewrite mod_pow_pos_aux_mod_idemp.
rewrite Z.mul_1_r.
rewrite IHx.
apply Z.mod_mod; auto.
+ rewrite mod_pow_pos_aux_mod_idemp.
apply IHx.
+ now rewrite Z.mul_1_r.
Qed.
Lemma mod_pow_pos_1_l x p :
mod_pow_pos 1 x p = 1 mod p.
Proof. apply mod_pow_pos_aux_1_l. Qed.
Lemma mod_inv_1_l p :
prime p ->
mod_inv 1 p = 1 mod p.
Proof.
intros isprime.
pose proof (prime_ge_2 _ isprime).
rewrite <- mod_inv_mod.
apply mod_mul_both_l with 1; auto.
rewrite mul_mod_inv by auto.
cbn.
now rewrite Z.mod_1_l by lia.
Qed.
Lemma mod_pow_1_l x p :
prime p ->
mod_pow 1 x p = 1 mod p.
Proof.
intros isprime.
destruct x; auto; cbn.
- apply mod_pow_pos_1_l.
- rewrite mod_pow_pos_1_l, mod_inv_mod_idemp by auto.
now apply mod_inv_1_l.
Qed.
Lemma mod_pow_1_r a p :
mod_pow a 1 p = a mod p.
Proof.
cbn -[Z.mul].
now rewrite Z.mul_1_l.
Qed.
Lemma mod_inv_mul a b p :
prime p ->
a mod p <> 0 ->
b mod p <> 0 ->
mod_inv (a * b) p = mod_inv b p * mod_inv a p mod p.
Proof.
intros isprime ap0 bp0.
pose proof (prime_ge_2 _ isprime).
rewrite <- mod_inv_mod.
apply mod_mul_both_l with (a * b); auto.
rewrite mul_mod_inv by auto.
rewrite <- Z.mul_assoc, (Z.mul_assoc b).
rewrite <- Z.mul_mod_idemp_r by lia.
rewrite <- (Z.mul_mod_idemp_l (b * _)) by lia.
rewrite Z.mul_mod_idemp_r by lia.
rewrite mul_mod_inv by auto.
rewrite Z.mul_1_l.
now rewrite mul_mod_inv by auto.
Qed.
Lemma mod_pow_pos_succ_r a x p :
a * mod_pow_pos a x p mod p = mod_pow_pos a (Pos.succ x) p.
Proof.
destruct (Z.eq_dec p 0) as [->|?].
- rewrite !mod_pow_pos_0_r.
now rewrite Zmod_0_r.
- rewrite !mod_pow_pos_spec, !Z.pow_pos_fold.
cbn.
rewrite Z.mul_mod_idemp_r by lia.
rewrite Z.pow_pos_fold.
rewrite <- Z.pow_succ_r by lia.
cbn.
now rewrite <- Pos.add_1_r.
Qed.
Lemma mod_pow_succ a x p :
prime p ->
a mod p <> 0 ->
mod_pow a (Z.succ x) p = a * mod_pow a x p mod p.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
destruct x.
- cbn.
rewrite Z.mod_1_l by lia.
rewrite Z.mul_1_r.
destruct a; auto.
- cbn -[Pos.add].
rewrite mod_pow_pos_succ_r.
now replace (p0 + 1)%positive with (Pos.succ p0) by lia.
- cbn.
destruct (Pos.eq_dec p0 1) as [->|?].
+ cbn -[Z.mul].
rewrite Z.mul_1_l.
rewrite mod_inv_mod_idemp by auto.
rewrite mul_mod_inv by auto.
now rewrite Z.mod_1_l by lia.
+ rewrite Z.pos_sub_lt by lia.
cbn.
rewrite <- mod_inv_mod.
apply mod_mul_both_l with (mod_inv a p); auto.
rewrite Z.mul_assoc.
rewrite <- (Z.mul_mod_idemp_l (mod_inv a p * a)) by lia.
rewrite (Z.mul_comm _ a).
rewrite mul_mod_inv by auto.
rewrite <- mod_inv_mul by auto.
rewrite Z.mul_comm.
rewrite <- mod_inv_mod_idemp by auto.
rewrite mod_pow_pos_succ_r.
replace (Pos.succ (p0 - 1)) with p0 by lia.
now rewrite Z.mul_1_l, mod_inv_mod.
Qed.
Lemma mod_pow_pred a x p :
prime p ->
a mod p <> 0 ->
mod_pow a (Z.pred x) p = mod_inv a p * mod_pow a x p mod p.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
rewrite <- mod_pow_mod.
apply mod_mul_both_l with a; auto.
rewrite <- mod_pow_succ by auto.
replace (Z.succ (Z.pred x)) with x by lia.
rewrite Z.mul_assoc.
rewrite <- Z.mul_mod_idemp_l by lia.
rewrite mul_mod_inv by auto.
now rewrite Z.mul_1_l, mod_pow_mod.
Qed.
Lemma mod_pow_exp_plus a x x' p :
prime p ->
a mod p <> 0 ->
mod_pow a (x + x') p = mod_pow a x p * mod_pow a x' p mod p.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
revert x'.
induction x using Z.peano_ind; intros x'.
- cbn.
rewrite Z.mod_1_l by lia.
rewrite Z.mul_1_l.
now rewrite mod_pow_mod by lia.
- replace (Z.succ x + x') with (Z.succ (x + x')) by lia.
rewrite mod_pow_succ by auto.
rewrite IHx.
rewrite Z.mul_mod_idemp_r by lia.
rewrite Z.mul_assoc.
rewrite <- Z.mul_mod_idemp_l by lia.
now rewrite <- mod_pow_succ by auto.
- replace (Z.pred x + x') with (x + Z.pred x') by lia.
rewrite IHx.
rewrite !mod_pow_pred by auto.
rewrite Z.mul_mod_idemp_l, Z.mul_mod_idemp_r by lia.
apply f_equal2; lia.
Qed.
Lemma mod_inv_involutive a p :
prime p ->
a mod p <> 0 ->
mod_inv (mod_inv a p) p = a mod p.
Proof.
intros isprime ap0.
rewrite <- mod_inv_mod.
apply mod_mul_both_l with (mod_inv a p); auto.
rewrite mul_mod_inv by auto.
now rewrite Z.mul_comm, mul_mod_inv by auto.
Qed.
Lemma mod_pow_pos_distr_exp a a' x p :
p <> 0 ->
mod_pow_pos (a * a') x p =
(mod_pow_pos a x p * mod_pow_pos a' x p) mod p.
Proof.
intros p0.
rewrite !mod_pow_pos_spec, !Z.pow_pos_fold.
rewrite Z.pow_mul_l.
now rewrite Z.mul_mod by auto.
Qed.
Lemma mod_inv_mod_pow_pos_comm a x p :
prime p ->
a mod p <> 0 ->
mod_inv (mod_pow_pos a x p) p = mod_pow_pos (mod_inv a p) x p.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
destruct (Z.eqb_spec p 2) as [->|?].
{
rewrite <- mod_pow_pos_mod_idemp, <- (mod_inv_mod_idemp a) by auto.
pose proof (Z.mod_pos_bound a 2 ltac:(lia)).
replace (a mod 2) with 1 by lia.
now rewrite !mod_pow_pos_1_l, mod_inv_1_l by auto.
}
rewrite <- mod_inv_mod, <- (mod_pow_pos_mod (mod_inv a p)).
apply mod_mul_both_l with (mod_pow_pos a x p); auto.
rewrite mul_mod_inv by auto.
rewrite <- mod_pow_pos_distr_exp by lia.
rewrite <- mod_pow_pos_mod_idemp.
rewrite mul_mod_inv by auto.
rewrite mod_pow_pos_1_l.
now rewrite Z.mod_1_l by lia.
Qed.
Lemma mod_pow_exp_mul a x x' p :
prime p ->
a mod p <> 0 ->
mod_pow a (x * x') p = mod_pow (mod_pow a x p) x' p.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
destruct x, x'; cbn;
repeat (
try rewrite Z.mod_1_l by lia;
try rewrite mod_pow_pos_1_l;
try rewrite mod_inv_1_l);
auto.
- apply mod_pow_pos_exp_mul.
- now rewrite mod_pow_pos_exp_mul.
- rewrite !mod_inv_mod_pow_pos_comm by auto.
now rewrite mod_pow_pos_exp_mul.
- rewrite mod_inv_mod_pow_pos_comm by auto.
rewrite mod_inv_involutive by auto.
rewrite mod_pow_pos_mod_idemp.
now rewrite <- mod_pow_pos_exp_mul.
Qed.
Lemma mod_pow_exp_opp a x p :
prime p ->
a mod p <> 0 ->
mod_pow a (-x) p = mod_inv (mod_pow a x p) p.
Proof.
intros isprime ap0.
pose proof (prime_ge_2 _ isprime).
destruct x; auto.
- cbn.
rewrite mod_inv_mod_idemp by auto.
now rewrite mod_inv_1_l.
- cbn.
rewrite mod_inv_involutive by auto.
now rewrite mod_pow_pos_mod.
Qed.
Lemma mod_pow_exp_mod a x p :
prime p ->
a mod p <> 0 ->
mod_pow a (x mod (p - 1)) p = mod_pow a x p.
Proof.
intros isprime ap.
pose proof (prime_ge_2 _ isprime).
rewrite (Z.div_mod x (p - 1)) at 2 by lia.
rewrite mod_pow_exp_plus by auto.
rewrite mod_pow_exp_mul by auto.
rewrite mod_pow_fermat by auto.
rewrite mod_pow_1_l by auto.
rewrite !Z.mod_1_l by lia.
now rewrite Z.mul_1_l, mod_pow_mod.
Qed.
Definition boardroom_axioms (p : Z) :
prime p -> BoardroomAxioms Z.
Proof.
intros isprime.
pose proof (prime_ge_2 _ isprime).
refine
{|
elmeq a b := a mod p = b mod p;
elmeqb a b := a mod p =? b mod p;
zero := 0;
one := 1;
add a a' := (a + a') mod p;
mul a a' := (a * a') mod p;
opp a := p - a;
inv a := mod_inv a p;
pow a e := mod_pow a e p;
order := p;
|}.
- intros x y; apply Z.eqb_spec.
- lia.
- constructor; auto.
now intros a a' a'' -> ->.
- intros a a' aeq b b' beq.
now rewrite Z.add_mod, aeq, beq, <- Z.add_mod by lia.
- intros a a' aeq b b' beq.
now rewrite Z.mul_mod, aeq, beq, <- Z.mul_mod by lia.
- intros a a' aeq.
now rewrite Zminus_mod, aeq, <- Zminus_mod.
- intros a a' aeq.
cbn.
now rewrite <- mod_inv_mod_idemp, aeq, mod_inv_mod_idemp by auto.
- intros a a' aeq e ? <-.
now rewrite <- mod_pow_mod_idemp, aeq, mod_pow_mod_idemp.
- intros a anp e e' eeq.
rewrite <- (mod_pow_exp_mod _ e), <- (mod_pow_exp_mod _ e') by auto.
now rewrite eeq.
- now rewrite Z.mod_1_l, Z.mod_0_l by lia.
- intros a b.
now rewrite Z.add_comm.
- intros a b c.
rewrite !Z.mod_mod by lia.
rewrite Z.add_mod_idemp_l, Z.add_mod_idemp_r by lia.
apply f_equal2; lia.
- intros a b.
now rewrite Z.mul_comm.
- intros a b c.
repeat (try rewrite Z.mul_mod_idemp_l; try rewrite Z.mul_mod_idemp_r); try lia.
now rewrite Z.mul_assoc.
- intros a.
now rewrite Z.mod_mod by lia.
- intros a.
now rewrite Z.mod_mod by lia.
- intros a.
rewrite Z.mod_mod by lia.
now rewrite Z.mul_1_l.
- intros a.
rewrite Z.mod_mod by lia.
replace (p - a + a) with p by lia.
rewrite Z.mod_same, Z.mod_0_l; lia.
- intros a anp.
now rewrite Z.mul_comm, mul_mod_inv by auto.
- intros a b c.
repeat (try rewrite Z.mul_mod_idemp_l;
try rewrite Z.mul_mod_idemp_r;
try rewrite Z.add_mod_idemp_l;
try rewrite Z.add_mod_idemp_r;
try rewrite Z.mod_mod); try lia.
apply f_equal2; lia.
- intros a anp.
cbn.
now rewrite Z.mod_1_l at 1 by lia.
- intros a.
now rewrite mod_pow_mod, mod_pow_1_r.
- intros a ap0.
rewrite (mod_pow_exp_opp _ 1) by auto.
rewrite mod_pow_1_r.
now rewrite mod_inv_mod_idemp.
- intros a e e' ap0.
now rewrite mod_pow_exp_plus by auto.
- intros a b e anz.
now rewrite mod_pow_exp_mul.
- auto.
- auto.
Defined.
End Zp.
Module BigZp.
Local Open Scope bigZ.
Fixpoint mod_pow_pos_aux (a : bigZ) (x : positive) (m : bigZ) (r : bigZ) : bigZ :=
match x with
| x~0%positive => mod_pow_pos_aux (BigZ.square a mod m) x m r
| x~1%positive => mod_pow_pos_aux (BigZ.square a mod m) x m (r * a mod m)
| _ => r * a mod m
end.
Definition mod_pow_pos (a : bigZ) (x : positive) (m : bigZ) : bigZ :=
mod_pow_pos_aux a x m 1.
Fixpoint egcd_aux
(n : nat)
(r0 a0 b0 r1 a1 b1 : bigZ) {struct n} : bigZ * bigZ :=
match n with
| 0%nat => (0, 0)
| S n => let (q, r) := BigZ.div_eucl r0 r1 in
if r =? 0 then
(a1, b1)
else
egcd_aux n r1 a1 b1 r (a0 - q*a1) (b0 - q*b1)
end.
Definition egcd (m n : bigZ) : bigZ * bigZ :=
if m =? 0 then
(0, BigZ.sgn n)
else
if n =? 0 then
(BigZ.sgn m, 0)
else
let num_steps :=
S (Z.to_nat (BigZ.to_Z (BigZ.log2 (BigZ.abs m) + BigZ.log2 (BigZ.abs n)))) in
if BigZ.abs m <? BigZ.abs n then
let (x, y) := egcd_aux num_steps (BigZ.abs n) 1 0 (BigZ.abs m) 0 1 in
(BigZ.sgn m * y, BigZ.sgn n * x)
else
let (x, y) := egcd_aux num_steps (BigZ.abs m) 1 0 (BigZ.abs n) 0 1 in
(BigZ.sgn m * x, BigZ.sgn n * y).
Definition mod_inv (a : bigZ) (p : bigZ) : bigZ :=
fst (egcd a p) mod p.
Definition mod_pow (a : bigZ) (x : Z) (p : bigZ) : bigZ :=
match x with
| Z0 => a ^ 0 mod p
| Zpos x => mod_pow_pos a x p
| Zneg x => mod_inv (mod_pow_pos a x p) p
end.
Hint Rewrite BigZ.square_spec BigZ.spec_pow_pos : zsimpl.
Hint Rewrite BigN.spec_of_pos : nsimpl.
Lemma spec_mod_pow_pos_aux a x p r :
[mod_pow_pos_aux a x p r] = Zp.mod_pow_pos_aux [a] x [p] [r].
Proof.
revert a p r.
induction x; intros a p r; cbn in *.
- rewrite IHx.
now autorewrite with zsimpl.
- rewrite IHx.
now autorewrite with zsimpl.
- now autorewrite with zsimpl.
Qed.
Hint Rewrite spec_mod_pow_pos_aux : zsimpl.
Lemma spec_mod_pow_pos a x p :
[mod_pow_pos a x p] = Zp.mod_pow_pos [a] x [p].
Proof. apply spec_mod_pow_pos_aux. Qed.
Hint Rewrite spec_mod_pow_pos : zsimpl.
Lemma spec_egcd_aux n r0 a0 b0 r1 a1 b1 :
let (x, y) := egcd_aux n r0 a0 b0 r1 a1 b1 in
([x], [y]) = Egcd.egcd_aux n [r0] [a0] [b0] [r1] [a1] [b1].
Proof.
revert r0 a0 b0 r1 a1 b1.
induction n as [|n IH]; [easy|]; intros r0 a0 b0 r1 a1 b1.
cbn.
pose proof (BigZ.spec_div_eucl r0 r1) as H.
destruct (BigZ.div_eucl r0 r1) as [q r].
destruct (Z.div_eucl [r0] [r1]) as [q' r'].
inversion H; subst.
rewrite BigZ.spec_eqb.
cbn.
destruct ([r] =? 0)%Z; [easy|].
rewrite <- !BigZ.spec_mul, <- !BigZ.spec_sub.
apply IH.
Qed.
Hint Rewrite BigZ.spec_abs : zsimpl.
Lemma spec_egcd a b :
let (x, y) := egcd a b in
([x], [y]) = Egcd.egcd [a] [b].
Proof.
unfold egcd, Egcd.egcd.
autorewrite with zsimpl.
change [0] with 0%Z.
destruct (_ =? _)%Z; [now autorewrite with zsimpl|].
destruct (_ =? _)%Z; [now autorewrite with zsimpl|].
destruct (_ <? _)%Z.
all: rewrite <- !BigZ.spec_abs.
all: change 1%Z with [1].
all: change 0%Z with [0].
all:
repeat
match goal with
| [|- context[egcd_aux ?n ?r0 ?a0 ?b0 ?r1 ?a1 ?b1]] =>
pose proof (spec_egcd_aux n r0 a0 b0 r1 a1 b1);
destruct (egcd_aux n r0 a0 b0 r1 a1 b1),
(Egcd.egcd_aux n [r0] [a0] [b0] [r1] [a1] [b1])
end.
all: inversion H.
all: now autorewrite with zsimpl.
Qed.
Lemma spec_mod_inv a p :
[mod_inv a p] = Zp.mod_inv [a] [p].
Proof.
unfold mod_inv, Zp.mod_inv.
pose proof (spec_egcd a p).
destruct (egcd _ _), (Egcd.egcd _ _).
inversion H.
now autorewrite with zsimpl.
Qed.
Hint Rewrite spec_mod_inv : zsimpl.
Lemma spec_mod_pow a x p :
[mod_pow a x p] = Zp.mod_pow [a] x [p].
Proof.
unfold mod_pow, Zp.mod_pow.
now destruct x; autorewrite with zsimpl.
Qed.
Hint Rewrite spec_mod_pow : zsimpl.
Hint Rewrite BigZ.spec_modulo : zsimpl.
Local Open Scope Z.
Definition boardroom_axioms (p : Z) :
prime p -> BoardroomAxioms Z.
Proof.
intros isprime.
pose proof (prime_ge_2 _ isprime).
refine
{| elmeq a b := a mod p = b mod p;
elmeqb a b := a mod p =? b mod p;
zero := 0;
one := 1;
add a a' := (a + a') mod p;
mul a a' := (a * a') mod p;
opp a := p - a;
inv a := [mod_inv (BigZ.of_Z a) (BigZ.of_Z p)];
pow a e := [mod_pow (BigZ.of_Z a) e (BigZ.of_Z p)];
order := p;
|}.
- intros x y; apply Z.eqb_spec.
- lia.
- constructor; auto.
now intros a a' a'' -> ->.
- intros a a' aeq b b' beq.
autorewrite with zsimpl in *.
now rewrite Z.add_mod, aeq, beq, <- Z.add_mod by lia.
- intros a a' aeq b b' beq.
autorewrite with zsimpl in *.
now rewrite Z.mul_mod, aeq, beq, <- Z.mul_mod by lia.
- intros a a' aeq.
autorewrite with zsimpl in *.
now rewrite Zminus_mod, aeq, <- Zminus_mod.
- intros a a' aeq.
autorewrite with zsimpl in *.
now rewrite <- Zp.mod_inv_mod_idemp, aeq, Zp.mod_inv_mod_idemp.
- intros a a' aeq e ? <-.
autorewrite with zsimpl in *.
now rewrite <- Zp.mod_pow_mod_idemp, aeq, Zp.mod_pow_mod_idemp.
- intros a anp e e' eeq.
autorewrite with zsimpl in *.
rewrite <- (Zp.mod_pow_exp_mod _ e), <- (Zp.mod_pow_exp_mod _ e') by auto.
now rewrite eeq.
- autorewrite with zsimpl in *.
now rewrite Z.mod_1_l, Z.mod_0_l by lia.
- intros a b.
autorewrite with zsimpl in *.
now rewrite Z.add_comm.
- intros a b c.
autorewrite with zsimpl in *.
rewrite !Z.mod_mod by lia.
rewrite Z.add_mod_idemp_l, Z.add_mod_idemp_r by lia.
apply f_equal2; lia.
- intros a b.
autorewrite with zsimpl in *.
now rewrite Z.mul_comm.
- intros a b c.
autorewrite with zsimpl in *.
repeat (try rewrite Z.mul_mod_idemp_l; try rewrite Z.mul_mod_idemp_r); try lia.
now rewrite Z.mul_assoc.
- intros a.
autorewrite with zsimpl in *.
now rewrite Z.mod_mod by lia.
- intros a.
autorewrite with zsimpl in *.
now rewrite Z.mod_mod by lia.
- intros a.
autorewrite with zsimpl in *.
rewrite Z.mod_mod by lia.
now rewrite Z.mul_1_l.
- intros a.
autorewrite with zsimpl in *.
rewrite Z.mod_mod by lia.
replace (p - a + a)%Z with p by lia.
rewrite Z.mod_same, Z.mod_0_l; lia.
- intros a anp.
autorewrite with zsimpl in *.
now rewrite Z.mul_comm, Zp.mul_mod_inv by auto.
- intros a b c.
autorewrite with zsimpl in *.
repeat (try rewrite Z.mul_mod_idemp_l;
try rewrite Z.mul_mod_idemp_r;
try rewrite Z.add_mod_idemp_l;
try rewrite Z.add_mod_idemp_r;
try rewrite Z.mod_mod); try lia.
apply f_equal2; lia.
- intros a anp.
autorewrite with zsimpl in *.
cbn.
now rewrite Z.mod_1_l at 1 by lia.
- intros a.
autorewrite with zsimpl in *.
now rewrite Zp.mod_pow_mod, Zp.mod_pow_1_r.
- intros a ap0.
autorewrite with zsimpl in *.
rewrite (Zp.mod_pow_exp_opp _ 1) by auto.
rewrite Zp.mod_pow_1_r.
now rewrite Zp.mod_inv_mod_idemp.
- intros a e e' ap0.
autorewrite with zsimpl in *.
now rewrite Zp.mod_pow_exp_plus by auto.
- intros a b e ap0.
autorewrite with zsimpl in *.
now rewrite Zp.mod_pow_exp_mul.
- intros a e ap0.
autorewrite with zsimpl in *.
auto.
- intros a ap0.
autorewrite with zsimpl in *.
auto.
Defined.
End BigZp.
|
{"author": "malthelange", "repo": "CLVM", "sha": "e80aef02c3112b5b62db79bc2b233020367b0bde", "save_path": "github-repos/coq/malthelange-CLVM", "path": "github-repos/coq/malthelange-CLVM/CLVM-e80aef02c3112b5b62db79bc2b233020367b0bde/execution/theories/Examples/BoardroomMath.v"}
|
"""These are statistical tests for the Infrequent sampling results."""
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import ks_2samp
from scipy import stats
import pandas as pd
def perform_ks_analysis(dataframe):
"""
Perform the KS Test and determines statistics.
Parameters:
-----------
dataframe : panda dataframe
Contains the effective times of rare events.
Returns:
----------
statistics : string
Displays the statistical information of the KS test.
"""
data = dataframe['Teff']
min = np.min(data)
max = np.max(data)
bins = 10*np.size(data)
time = np.logspace(np.log10(min), np.log10(max), num=bins)
mu = np.mean(data)
time_centers = np.r_[0.5 * (time[:-1] + time[1:])]
hist, bins2 = np.histogram(data, bins=time, density=False)
cdf = np.cumsum(hist)*1.0/data.size
# Fit the CDF
taufit, pcov = curve_fit(analyticalCDF, time_centers, cdf, mu)
# print "mu (ns)\t\t", mu
# print "taufit (ns)\t", taufit[0]
points = 1e5
randdata = np.random.gamma(1, taufit, np.size(data)*points)
# perfrom the KS test to see if data points from MetaD are statistically
# the same as the data points from the analytical fit
stat, p = ks_2samp(data, randdata)
statistics = ("mu:" + str(np.mean(data)) + "\n" + "mu_sem:" +
str(stats.sem(data)) + "\n" + "sigma:" +
str(np.std(data, ddof=1)) + "\n" + "t_m:" +
str(np.median(data)) + "\n" + "tau:" + str(taufit) + "\n" +
"mu_sigma_ratio:" + str(np.mean(data)/np.std(data, ddof=1)) +
"\n" + "log2mu_median_ratio:" +
str(np.log(2)*np.mean(data)/np.median(data)) + "\n" +
"tau_mu_ratio:" + str(taufit/np.mean(data)) + "\n" +
"p-value:" + str(p) + "\n" + "ks-stat:" + str(stat) + "\n" +
"events recorded:"+str(np.size(data)))
return statistics
# random sampling on data set
def sampling(dataframe, num_iters, sampsize):
"""
Perform boostrapping procedure for error analysis.
Parameters:
-----------
dataframe : panda dataframe
Contains the effective times of rare events.
num_iters : int
Number of iterations for bootstrapping
sampsize : int
Size of bootstrapped samples
Returns:
----------
means : array of floats
Average effective times of each accepted bootstrapped sample
pvals : array of floats
P values of each accepted bootstrapped sample
rejects : int
Number of samples rejected
"""
# if sampsize > 100
# sampsize = 100
data = dataframe['Teff']
means = 0.0
pvals = 0.0
points = 1e4 # number of sampling points for p-val
alpha = 0.05
# for i in range((num_iters)):
# while np.size(means) <= num_iters:
smalldata = np.random.choice(data, sampsize, replace=True)
# hist / CDF fit / etc
min = np.min(smalldata)
max = np.max(smalldata)
bins = 10*np.size(smalldata)
time = np.logspace(np.log10(min), np.log10(max), num=bins)
mu = np.mean(smalldata)
time_centers = np.r_[0.5 * (time[:-1] + time[1:])]
hist, bins2 = np.histogram(smalldata, bins=time, density=False)
cdf = np.cumsum(hist)*1.0/smalldata.size
taufit, pcov = curve_fit(analyticalCDF, time_centers, cdf, mu)
# analysis
randdata = np.random.gamma(1, taufit, np.size(smalldata)*points)
stat, p = ks_2samp(smalldata, randdata)
if p > alpha:
means = mu
pvals = p
reject = 'No'
# debugprint p, mu
# means.resize(means.size+1)
# pvals.resize(pvals.size+1)
if p < alpha:
reject = 'Yes'
# this is just book keeping to remove the last 0 element
# means = means[:(means.size-1)]
# pvals = pvals[:(pvals.size-1)]
return means, pvals, reject
def analyticalCDF(times, tau):
"""Return analytical CDF for a set of data and tau."""
return 1-np.exp(-times/tau)
# lets make some plots
# fig = plt.figure(figsize=(6, 6))
# fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
# wspace=None,
# hspace=0.2)
#
# axes = fig.add_subplot(111)
# axes.plot(bins2[1:bins], cdf, label='$CDF$')
# axes.set_xscale('log')
# axes.plot(time_centers, analyticalCDF(time_centers, taufit),
# label='$analytical\ CDF$')
# first_legend = plt.legend(loc=0)
# axes.set_xlabel('$log\ time\ (ns)$')
# axes.set_ylabel('$P_{n\geq1}$')
# plt.show()
|
{"hexsha": "f127d3153c50ab6d07dcc90115dd9cf1a0dcad06", "size": 4747, "ext": "py", "lang": "Python", "max_stars_repo_path": "LimPy/statistical_functions.py", "max_stars_repo_name": "UWPRG/LimPy", "max_stars_repo_head_hexsha": "2a2979306ec4264de31d5ce1d2f8a59bd6eb7e9c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-08T00:53:03.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-08T00:53:03.000Z", "max_issues_repo_path": "LimPy/statistical_functions.py", "max_issues_repo_name": "UWPRG/LimPy", "max_issues_repo_head_hexsha": "2a2979306ec4264de31d5ce1d2f8a59bd6eb7e9c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LimPy/statistical_functions.py", "max_forks_repo_name": "UWPRG/LimPy", "max_forks_repo_head_hexsha": "2a2979306ec4264de31d5ce1d2f8a59bd6eb7e9c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-09T06:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-18T14:58:50.000Z", "avg_line_length": 32.7379310345, "max_line_length": 79, "alphanum_fraction": 0.5772066568, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1306}
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2022 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import json
import logging
from datetime import datetime
from pathlib import Path
from pprint import pprint
from typing import Optional
from typing import Union
import hydra.utils
import imageio
import numpy as np
import psutil
import torch
from disent.dataset.sampling import GroundTruthRandomWalkSampler
from disent.util.visualize.vis_util import make_image_grid
from omegaconf import DictConfig
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import DataLoader
from disent.dataset import DisentIterDataset
from disent.dataset.data import GroundTruthData
from disent.dataset.sampling import BaseDisentSampler
from disent.dataset.sampling import GroundTruthDistSampler
from disent.dataset.sampling import GroundTruthPairOrigSampler
from disent.dataset.sampling import GroundTruthPairSampler
from disent.dataset.sampling import GroundTruthTripleSampler
from disent.dataset.sampling import RandomSampler
from disent.dataset.transform import ToImgTensorF32
from disent.dataset.util.stats import compute_data_mean_std
from disent.frameworks.ae import Ae
from disent.frameworks.vae import AdaVae
from disent.frameworks.vae import BetaVae
from disent.frameworks.vae import TripletVae
from disent.frameworks.vae import Vae
from disent.metrics import metric_mig
from disent.model import AutoEncoder
from disent.model.ae import DecoderConv64
from disent.model.ae import EncoderConv64
from disent.nn.weights import init_model_weights
from disent.util.lightning.callbacks import LoggerProgressCallback
from disent.util.lightning.callbacks import VaeLatentCycleLoggingCallback
from disent.util.lightning.callbacks._callback_vis_latents import get_vis_min_max
from disent.util.visualize.plot import plt_imshow
from disent.util.visualize.vis_img import torch_to_images
from experiment.util.path_utils import make_current_experiment_dir
from research.code.dataset.data import XYSingleSquareData
from research.code.frameworks.vae import AdaTripletVae
from research.code.metrics import metric_factored_components
log = logging.getLogger(__name__)
# ========================================================================= #
# Train A Single VAE #
# ========================================================================= #
def train(
save_dir: Union[str, Path],
data: GroundTruthData,
sampler: BaseDisentSampler,
framework: Union[Ae, Vae],
train_steps: int = 5000,
batch_size: int = 64,
num_workers: int = psutil.cpu_count(logical=False),
save_top_k: int = 5,
save_every_n_steps: int = 2500,
profile: bool = False,
data_mean=None,
data_std=None,
):
vis_min, vis_max = get_vis_min_max(recon_mean=data_mean, recon_std=data_std)
# normalise the paths
save_dir = Path(save_dir)
save_dir.mkdir(exist_ok=False, parents=True)
# make the dataset
dataset = DisentIterDataset(data, sampler=sampler, transform=ToImgTensorF32(size=64, mean=data_mean, std=data_std))
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=torch.cuda.is_available())
# make the trainer
trainer = Trainer(
logger=False,
callbacks=[
ModelCheckpoint(dirpath=save_dir, monitor='loss', verbose=True, save_top_k=save_top_k, every_n_train_steps=save_every_n_steps),
LoggerProgressCallback(interval=5, log_level=logging.INFO),
# RichModelSummary(max_depth=2),
# RichProgressBar(refresh_rate_per_second=3),
# LoggerProgressCallback(),
# VaeMetricLoggingCallback(),
# VaeLatentCycleLoggingCallback(),
# VaeGtDistsLoggingCallback(),
],
enable_progress_bar=False,
enable_model_summary=False,
max_steps=train_steps,
max_epochs=train_steps,
gpus=1 if torch.cuda.is_available() else 0,
profiler='simple' if profile else None,
)
# train the framework
trainer.fit(framework, dataloader)
# visualise model -- generate
stills, animation, image = VaeLatentCycleLoggingCallback.generate_visualisations(dataset, framework, recon_mean=data_mean, recon_std=data_std, mode='minmax_interval_cycle', num_stats_samples=256)
# visualise model -- plot image, save image, save vid
plt_imshow(image, show=True)
imageio.imsave(save_dir.joinpath('latent_cycle.png'), image)
with imageio.get_writer(save_dir.joinpath('latent_cycle.mp4'), fps=4) as writer:
for frame in animation:
writer.append_data(frame)
# visualise data -- generate
data_batch = dataset.dataset_sample_batch(stills.shape[0]*stills.shape[1], mode='input', seed=7777, replace=True)
data_batch = torch_to_images(data_batch, in_min=vis_min, in_max=vis_max, always_rgb=True).numpy()
data_image = make_image_grid(data_batch, num_cols=stills.shape[1], pad=4)
# visualise data -- plot image, save image
plt_imshow(data_image, show=True)
imageio.imsave(save_dir.joinpath('data_samples.png'), data_image)
# compute metrics
get_repr = lambda x: framework.encode(x.to(framework.device))
metrics = {
# **metric_dci.compute_fast(dataset, get_repr),
**metric_mig.compute_fast(dataset, get_repr),
**metric_factored_components.compute_fast(dataset, get_repr),
}
# print and save the metrics
pprint(metrics)
with open(save_dir.joinpath('metrics.json'), 'w') as fp:
json.dump(metrics, fp, indent=2, sort_keys=True)
# done!
return save_dir, metrics
# ========================================================================= #
# Train Multiple VAEs -- The actual experiment! #
# ========================================================================= #
def _load_ada_schedules(max_steps: int):
path = Path(__file__).parent.parent.parent.joinpath('config', 'schedule', 'adavae_up_all.yaml')
with open(path, 'r') as fp:
conf = DictConfig({
'trainer': {'max_steps': max_steps},
'schedule': OmegaConf.load(fp),
})
return hydra.utils.instantiate(conf.schedule.schedule_items)
def run_experiments(
lr: float = 1e-4,
z_size: int = 9,
exp_dir: Optional[Union[Path, str]] = None,
train_steps: int = 10_000,
batch_size: int = 64,
num_workers: int = psutil.cpu_count(logical=False),
compute_stats: bool = False,
profile: bool = False,
ada_ratio: float = 1.25
):
# PERMUTATIONS:
datasets = [
('xy8', XYSingleSquareData, dict(square_size=8, grid_spacing=8, image_size=64), [0.015625], [0.12403473458920848]),
# ('xy4', XYSingleSquareData, dict(square_size=8, grid_spacing=4, image_size=64), [0.015625], [0.12403473458920848]),
# ('xy2', XYSingleSquareData, dict(square_size=8, grid_spacing=2, image_size=64), [0.015625], [0.12403473458920848]),
# ('xy1', XYSingleSquareData, dict(square_size=8, grid_spacing=1, image_size=64), [0.015625], [0.12403473458920848]),
]
triplet_sampler_maker_A = lambda: GroundTruthDistSampler(num_samples=3, triplet_sample_mode='manhattan_scaled', triplet_swap_chance=0.0)
triplet_sampler_maker_A1 = lambda: GroundTruthDistSampler(num_samples=3, triplet_sample_mode='manhattan_scaled', triplet_swap_chance=0.1) # actually works quite well if ada_ratio is lower, eg 1.25 instead of 1.5, but might hurt recons? check?
triplet_sampler_maker_A2 = lambda: GroundTruthDistSampler(num_samples=3, triplet_sample_mode='manhattan_scaled', triplet_swap_chance=0.2) # actually works quite well if ada_ratio is lower, eg 1.25 instead of 1.5, but might hurt recons? check?
triplet_sampler_maker_B = lambda: GroundTruthTripleSampler(p_k_range=1, n_k_range=(0, -1), n_k_sample_mode='bounded_below', n_k_is_shared=True, p_radius_range=1, n_radius_range=(0, -1), n_radius_sample_mode='bounded_below') # this one is really bad
triplet_sampler_maker_C = lambda: GroundTruthTripleSampler(p_k_range=(0, -1), n_k_range=(0, -1), n_k_sample_mode='bounded_below', n_k_is_shared=True, p_radius_range=(0, -1), n_radius_range=(0, -1), n_radius_sample_mode='bounded_below') # pretty much the same as the manhat above, except more strict... actually less real because its bounded below. Real episodes when sampled by time will usually be further away, but not necessarily bounded below like this.
triplet_sampler_maker_D64 = lambda: GroundTruthRandomWalkSampler(num_samples=3, p_dist_max=8, n_dist_max=64)
triplet_sampler_maker_D32 = lambda: GroundTruthRandomWalkSampler(num_samples=3, p_dist_max=8, n_dist_max=32)
triplet_sampler_maker_D16 = lambda: GroundTruthRandomWalkSampler(num_samples=3, p_dist_max=8, n_dist_max=16)
triplet_sampler_maker_D8 = lambda: GroundTruthRandomWalkSampler(num_samples=3, p_dist_max=8, n_dist_max=8)
frameworks = [
('betavae', BetaVae, lambda: BetaVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.0001), lambda: RandomSampler(num_samples=1), lambda: {}),
('adavae', AdaVae, lambda: AdaVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.0001), lambda: GroundTruthPairSampler(p_k_range=1, p_radius_range=(1, -1)), lambda: {}),
('adavae_os', AdaVae, lambda: AdaVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.0001), lambda: GroundTruthPairOrigSampler(p_k=1), lambda: {}),
('triplet_soft_A', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_A, lambda: {}),
('adatvae_soft_A', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_A, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('triplet_soft_A1', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_A1, lambda: {}),
('triplet_soft_A2', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_A2, lambda: {}),
('triplet_soft_B', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_B, lambda: {}),
('triplet_soft_C', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_C, lambda: {}),
# ('triplet_sigmoid10', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_sigmoid', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker, lambda: {}),
# ('triplet_sigmoid1', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_sigmoid', triplet_scale=1, triplet_margin_max=1, triplet_p=1), triplet_sampler_maker, lambda: {}),
# ('triplet10', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker, lambda: {}),
# ('triplet1', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet', triplet_scale=1, triplet_margin_max=1, triplet_p=1), triplet_sampler_maker, lambda: {}),
('adatvae_soft_A1', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_A1, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('adatvae_soft_A2', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_A2, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('adatvae_soft_B', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_B, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('adatvae_soft_C', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_C, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
# ('adatvae_soft', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker, lambda: _load_ada_schedules(max_steps=int(train_steps*ADA_RATIO))),
# ('adatvae_sig10', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_sigmoid', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker, lambda: _load_ada_schedules(max_steps=int(train_steps*ADA_RATIO))),
# ('adatvae_sig1', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_sigmoid', triplet_scale=1, triplet_margin_max=1, triplet_p=1), triplet_sampler_maker, lambda: _load_ada_schedules(max_steps=int(train_steps*ADA_RATIO))),
# ('adatvae_trip10', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker, lambda: _load_ada_schedules(max_steps=int(train_steps*ADA_RATIO))),
# ('adatvae_trip1', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet', triplet_scale=1, triplet_margin_max=1, triplet_p=1), triplet_sampler_maker, lambda: _load_ada_schedules(max_steps=int(train_steps*ADA_RATIO))),
('triplet_soft_D64', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D64, lambda: {}),
('triplet_soft_D32', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D32, lambda: {}),
('triplet_soft_D16', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D16, lambda: {}),
('triplet_soft_D8', TripletVae, lambda: TripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D8, lambda: {}),
('adatvae_soft_D64', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D64, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('adatvae_soft_D32', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D32, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('adatvae_soft_D16', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D16, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
('adatvae_soft_D8', AdaTripletVae, lambda: AdaTripletVae.cfg(optimizer='adam', optimizer_kwargs=dict(lr=lr), beta=0.001, triplet_loss='triplet_soft', triplet_scale=1, triplet_margin_max=10, triplet_p=1), triplet_sampler_maker_D8, lambda: _load_ada_schedules(max_steps=int(train_steps * ada_ratio))),
]
# GET NAME:
exp_name = f'xy8_{ada_ratio}_{train_steps}'
# NORMALIZE:
if exp_dir is None:
exp_dir = make_current_experiment_dir(str(Path(__file__).parent.joinpath('exp')), name=exp_name)
exp_dir = Path(exp_dir)
# COMPUTE DATASET STATS:
if compute_stats:
for i, (data_name, data_cls, data_kwargs, data_mean, data_std) in enumerate(datasets):
data = data_cls(transform=ToImgTensorF32(), **data_kwargs)
mean, std = compute_data_mean_std(data, progress=True, num_workers=num_workers, batch_size=batch_size)
print(f'{data.__class__.__name__} - {data_name}:{len(data)} - {data_kwargs}:\n mean: {mean.tolist()}\n std: {std.tolist()}')
# TRAIN DIFFERENT FRAMEWORKS ON DATASETS
for i, (data_name, data_cls, data_kwargs, data_mean, data_std) in enumerate(datasets):
# make the dataset
data: GroundTruthData = data_cls(**data_kwargs)
# train the framework
for j, (framework_name, framework_cls, framework_cfg_maker, sampler_maker, schedules_maker) in enumerate(frameworks):
start_time = datetime.today().strftime('%Y-%m-%d--%H-%M-%S')
# make the data & framework
framework: Union[Ae, Vae] = framework_cls(
model=AutoEncoder(
encoder=EncoderConv64(x_shape=data.x_shape, z_size=z_size, z_multiplier=2 if issubclass(framework_cls, Vae) else 1),
decoder=DecoderConv64(x_shape=data.x_shape, z_size=z_size),
),
cfg=framework_cfg_maker()
)
sampler = sampler_maker()
print('=' * 100)
print(f'[{i}x{j}] Dataset: name={data_name}, size={len(data)}, kwargs={data_kwargs}, mean={data_mean}, std={data_std}')
print(f'[{i}x{j}] Framework: name={framework_name} ({framework.__class__.__name__}), sampler={sampler.__class__.__name__}, cfg={framework.cfg.to_dict()}')
print('=' * 100)
# register the schedules
for k, schedule in schedules_maker().items():
framework.register_schedule(k, schedule)
# initialize weights
init_model_weights(framework, mode='xavier_normal', log_level=logging.DEBUG)
# train the framework
run_name = f'{i}x{j}_{data_name}_{framework_name}'
save_dir = exp_dir.joinpath(run_name)
save_dir, metrics = train(
save_dir=save_dir,
data=data,
sampler=sampler,
framework=framework,
train_steps=train_steps,
batch_size=batch_size,
num_workers=num_workers,
profile=profile,
save_every_n_steps=train_steps,
)
# generate data for rl
with torch.no_grad():
dataset: GroundTruthData = data_cls(**data_kwargs, transform=ToImgTensorF32(size=64, mean=data_mean, std=data_std))
dat = {
# experiment info
'exp_name': save_dir.parent.name,
'run_name': save_dir.name,
'start_time': start_time,
# dataset data
'factor_names': data.factor_names,
'factor_sizes': data.factor_sizes,
# sizes
'num_factors': data.num_factors,
'num_obs': len(data),
'num_latents': z_size,
# image data
'obs_indices': np.array([i for i in range(len(data))]),
'obs_factors': np.array([data.idx_to_pos(i) for i in range(len(data))]),
'obs': np.array([data[i] for i in range(len(data))]),
# representations
'obs_encodings': np.array([framework.encode(dataset[i][None, ...].to(framework.device))[0].cpu().numpy() for i in range(len(data))]),
# results
'metrics': metrics,
# descriptions
'_desc_': {
# experiment info
'exp_name': f'The name of the group of experiments',
'run_name': f'The name of this individual run that is part of the experiment',
'start_time': f'The starting time of this individual run',
# dataset data
'factor_names': f'The names of the ground truth factors in the dataset, eg. ["x", "y"]',
'factor_sizes': f'The sizes of the ground truth factors in the dataset, eg. [8, 8]',
# sizes
'num_factors': f'The number of different ground_truth factors in the dataset, eg. 2',
'num_obs': f'The number of elements in the dataset, equal to the product of all the factor sizes, eg. 8x8 = 64',
'num_latents': f'The number of latent units of the model or rather the number of encoder outputs, eg. 9',
# image data
'obs_indices': f'The index of each observation in the dataset, eg. [0, 1, ..., 62, 63]. '
f'The shape is (num_obs,)',
'obs_factors': f'The ground truth factor of each observation in the dataset, eg. [[0, 0], [0, 1], ..., [7, 6], [7, 7]]. '
f'The shape is (num_obs, num_factors)',
'obs': f'The raw observations from the dataset, eg. [<img0>, ..., <img63>]. '
f'The shape is (num_obs, H, W, C)',
'obs_encodings': f'The low dimensional encodings of each observations, eg. [<enc1>, ..., <enc63>]. '
f'The shape is (num_obs, num_latents)',
# results
'metrics': f'Dict[str, float] of various scores from different disentanglement metrics computed over the model and the data.',
}
}
# save the data for rl
# | rl_dat_path = save_dir.joinpath('rl_data.json')
# | with open(rl_dat_path, 'w') as fp:
# | json.dump({k: (v.tolist() if isinstance(v, np.ndarray) else v) for k, v in dat.items()}, fp)
# | print(f'[SAVED DATA]: {rl_dat_path}')
rl_npz_path = save_dir.joinpath('rl_data.npz')
np.savez_compressed(rl_npz_path, data=dat)
print(f'[SAVED DATA]: {rl_npz_path}')
# ========================================================================= #
# RUN! #
# ========================================================================= #
if __name__ == '__main__':
# make sure we can see the output!
logging.basicConfig(level=logging.INFO)
# run everything!
# run_experiments(train_steps=10000, ada_ratio=1.5) # ada not always strong enough? or is that a metric error
run_experiments(train_steps=10000, ada_ratio=1.25) # seems best?
# run_experiments(train_steps=5000, ada_ratio=1.5)
# run_experiments(train_steps=5000, ada_ratio=1.25)
# ========================================================================= #
# END #
# ========================================================================= #
|
{"hexsha": "eb079949b0e969ef600853562d4788e8c3a93d0f", "size": 26147, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/part04_application_to_rl/e01_learn_xy_representations/train_vae.py", "max_stars_repo_name": "nmichlo/msc-research", "max_stars_repo_head_hexsha": "625e57eca77bbfbc4728ccebdb0733e1613bd258", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-31T21:20:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:20:30.000Z", "max_issues_repo_path": "research/part04_application_to_rl/e01_learn_xy_representations/train_vae.py", "max_issues_repo_name": "nmichlo/msc-research", "max_issues_repo_head_hexsha": "625e57eca77bbfbc4728ccebdb0733e1613bd258", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "research/part04_application_to_rl/e01_learn_xy_representations/train_vae.py", "max_forks_repo_name": "nmichlo/msc-research", "max_forks_repo_head_hexsha": "625e57eca77bbfbc4728ccebdb0733e1613bd258", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.7015306122, "max_line_length": 464, "alphanum_fraction": 0.6635178032, "include": true, "reason": "import numpy", "num_tokens": 6458}
|
/*****************************************************************************
*
* This file is part of Mapnik (c++ mapping toolkit)
*
* Copyright (C) 2011 Artem Pavlenko
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
#ifndef MAPNIK_DATASOURCE_CACHE_HPP
#define MAPNIK_DATASOURCE_CACHE_HPP
// mapnik
#include <mapnik/utils.hpp>
#include <mapnik/params.hpp>
#include <mapnik/datasource.hpp>
#include <mapnik/noncopyable.hpp>
// boost
#include <boost/shared_ptr.hpp>
// stl
#include <map>
namespace mapnik {
class PluginInfo;
class MAPNIK_DECL datasource_cache
: public singleton<datasource_cache, CreateStatic>,
private mapnik::noncopyable
{
friend class CreateStatic<datasource_cache>;
public:
std::vector<std::string> plugin_names();
std::string plugin_directories();
void register_datasources(std::string const& path);
bool register_datasource(std::string const& path);
boost::shared_ptr<datasource> create(parameters const& params);
private:
datasource_cache();
~datasource_cache();
std::map<std::string,boost::shared_ptr<PluginInfo> > plugins_;
bool registered_;
std::vector<std::string> plugin_directories_;
};
}
#endif // MAPNIK_DATASOURCE_CACHE_HPP
|
{"hexsha": "19b6db245ef0e23ac0407e987d1b688afbf7de34", "size": 2060, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "external/include/mapnik/datasource_cache.hpp", "max_stars_repo_name": "Wujingli/OpenWebGlobeDataProcessing", "max_stars_repo_head_hexsha": "932eaa00c81fc0571122bc618ade010fa255735e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "external/include/mapnik/datasource_cache.hpp", "max_issues_repo_name": "Wujingli/OpenWebGlobeDataProcessing", "max_issues_repo_head_hexsha": "932eaa00c81fc0571122bc618ade010fa255735e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "external/include/mapnik/datasource_cache.hpp", "max_forks_repo_name": "Wujingli/OpenWebGlobeDataProcessing", "max_forks_repo_head_hexsha": "932eaa00c81fc0571122bc618ade010fa255735e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-06-08T15:59:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-06T08:13:01.000Z", "avg_line_length": 32.6984126984, "max_line_length": 80, "alphanum_fraction": 0.6650485437, "num_tokens": 446}
|
print('Loading...')
import numpy as np
import numpy as num
from statistics import mean
import matplotlib.pyplot as plt
import aubio
import pyaudio
import wave
import os
def wavelength_to_rgb(wavelength, gamma=0.8):
'''This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
wavelength = float(wavelength)
if wavelength >= 380 and wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
elif wavelength >= 440 and wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif wavelength >= 490 and wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif wavelength >= 510 and wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif wavelength >= 580 and wavelength <= 645:
R = 1.0
G = (-(wavelength - 645) / (645 - 580)) ** gamma
B = 0.0
elif wavelength >= 645 and wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1.0 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
R *= 255
G *= 255
B *= 255
return [int(R), int(G), int(B)]
# PyAudio object.
p = pyaudio.PyAudio()
# Open stream.
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, input=True,
frames_per_buffer=1024)
# Aubio's pitch detection.
pDetection = aubio.pitch("default", 2048,
2048//2, 44100)
# Set unit.
pDetection.set_unit("Hz")
pDetection.set_silence(-60)
plt.ion()
fig = plt.gcf()
fig.show()
fig.canvas.draw()
iteration = 0
pitch_list = []
plt.title('Plotting of the current input audio frequency')
while True:
iteration += 1
if iteration > 50:
iteration = 0
plt.title('Color representation of the current input audio frequency')
plt.clf()
data = stream.read(1024, exception_on_overflow=False)
samples = num.fromstring(data, dtype=aubio.float_type)
pitch = pDetection(samples)[0]
# Compute the energy (volume) of the
# current frame.
volume = num.sum(samples**2)/len(samples)
# Format the volume output so that at most
# it has six decimal numbers.
volume = "{:.6f}".format(volume)
pitch_list.append(float(pitch))
wavelength = 700-((pitch*255/1000)+450)+450
rgb = wavelength_to_rgb(wavelength)
print('RGB: ' + str(rgb))
color = np.array(rgb)
if float(pitch) < 2000:
plt.plot(iteration, float(pitch), c=color/255.0, linestyle='-', marker='o')
os.system('cls' if os.name == 'nt' else 'clear')
print('Frequency: ' + str(pitch) + 'Hz')
print('Volume: ' + str(volume))
plt.xlabel('Average Frequency: ' + str(round(mean(pitch_list))) + 'Hz | Volume: ' + str(volume) + ' | Current Frequency: ' + str(round(float(pitch))) + 'Hz')
fig.canvas.draw()
plt.pause(0.1)
|
{"hexsha": "d816b2e9a2f34bb891acee10098413128598c3f4", "size": 3288, "ext": "py", "lang": "Python", "max_stars_repo_path": "pitch_plotting.py", "max_stars_repo_name": "Animenosekai/AudioVisualization.py", "max_stars_repo_head_hexsha": "6742388a564883532188c9b720a491b561134bae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pitch_plotting.py", "max_issues_repo_name": "Animenosekai/AudioVisualization.py", "max_issues_repo_head_hexsha": "6742388a564883532188c9b720a491b561134bae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pitch_plotting.py", "max_forks_repo_name": "Animenosekai/AudioVisualization.py", "max_forks_repo_head_hexsha": "6742388a564883532188c9b720a491b561134bae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9090909091, "max_line_length": 158, "alphanum_fraction": 0.6082725061, "include": true, "reason": "import numpy", "num_tokens": 996}
|
#include <stan/math/prim/scal.hpp>
#include <boost/math/special_functions/digamma.hpp>
#include <gtest/gtest.h>
#include <cmath>
#include <limits>
TEST(MathFunctions, digamma) {
EXPECT_FLOAT_EQ(boost::math::digamma(0.5), stan::math::digamma(0.5));
EXPECT_FLOAT_EQ(boost::math::digamma(-1.5), stan::math::digamma(-1.5));
}
TEST(MathFunctions, digamma_nan) {
double nan = std::numeric_limits<double>::quiet_NaN();
EXPECT_TRUE(std::isnan(stan::math::digamma(nan)));
EXPECT_TRUE(std::isnan(stan::math::digamma(-1)));
EXPECT_TRUE(std::isnormal(stan::math::digamma(1.0E50)));
}
|
{"hexsha": "20acf1aba327c39bc09d49c6e384f279a4326482", "size": 589, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit/math/prim/scal/fun/digamma_test.cpp", "max_stars_repo_name": "christophernhill/math", "max_stars_repo_head_hexsha": "dc41aba296d592c7099be15eed6ba136d0f140b3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/unit/math/prim/scal/fun/digamma_test.cpp", "max_issues_repo_name": "christophernhill/math", "max_issues_repo_head_hexsha": "dc41aba296d592c7099be15eed6ba136d0f140b3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit/math/prim/scal/fun/digamma_test.cpp", "max_forks_repo_name": "christophernhill/math", "max_forks_repo_head_hexsha": "dc41aba296d592c7099be15eed6ba136d0f140b3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0476190476, "max_line_length": 73, "alphanum_fraction": 0.7113752122, "num_tokens": 174}
|
from PIL import Image
import numpy as np
from GrayScale import GrayScale
from matplotlib import pyplot as plt
def GaussianBlur(img, window_size = 3, sigma = 0.5):
"""
Performs a Blurring operation on the input grayscale image using the Normalized Gaussian Kernel.
Input: numpy array of grayscale image
Gaussian Kernel window size (default = 3)
Standard Deviation of the Gaussian Kernel (default = 0.5)
Output: numpy array of blurred image
"""
temp = np.zeros((window_size, window_size))
sum = 0
center = window_size // 2
for i in range (0, window_size):
for j in range (0, window_size):
temp[i, j] = np.exp(-1 * (((i - center) ** 2) + ((j - center) ** 2)) / (sigma ** 2 * 2))
sum += temp[i, j]
temp = temp / sum
img = GrayScale(img)
img_conv = img.copy()
for i in range(center, np.shape(img)[0] - center):
for j in range(center, np.shape(img)[1] - center):
img_conv[i, j] = np.sum(
temp * img[i - center:i + center + 1, j - center:j + center + 1])
return img_conv
if __name__ == "__main__":
img = np.array(Image.open(
'Images/SRA Khopdi Baba Grayscale.jpg'), np.float64)
img_conv = GaussianBlur(img, 5, 2)
plt.imshow(img_conv, cmap='gray', interpolation='nearest')
plt.axis('on')
plt.xlabel('{} pixels'.format(np.shape(img_conv)[1]))
plt.ylabel('{} pixels'.format(np.shape(img_conv)[0]))
plt.title("Gaussian Filtered Image")
plt.show()
|
{"hexsha": "b149d60c22972ba766b62feafd256d6c6439bedc", "size": 1536, "ext": "py", "lang": "Python", "max_stars_repo_path": "GaussianBlur.py", "max_stars_repo_name": "saurabh1002/Computer_Vision_Python", "max_stars_repo_head_hexsha": "83feee97082bf09ddbd3d79ff546d9d545c3c8ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-21T15:25:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-21T15:25:48.000Z", "max_issues_repo_path": "GaussianBlur.py", "max_issues_repo_name": "saurabh1002/Computer_Vision_Python", "max_issues_repo_head_hexsha": "83feee97082bf09ddbd3d79ff546d9d545c3c8ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GaussianBlur.py", "max_forks_repo_name": "saurabh1002/Computer_Vision_Python", "max_forks_repo_head_hexsha": "83feee97082bf09ddbd3d79ff546d9d545c3c8ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3469387755, "max_line_length": 100, "alphanum_fraction": 0.6087239583, "include": true, "reason": "import numpy", "num_tokens": 411}
|
import sys
import numpy as np
from MCEq.misc import theta_rad
import mceq_config as config
class EarthGeometry(object):
r"""A model of the Earth's geometry, approximating it
by a sphere. The figure below illustrates the meaning of the parameters.
.. figure:: graphics/geometry.*
:scale: 30 %
:alt: picture of geometry
Curved geometry as it is used in the code (not to scale!).
Example:
The plots below will be produced by executing the module::
$ python geometry.py
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from MCEq.geometry.geometry import *
from MCEq.misc import theta_rad
g = EarthGeometry()
theta_list = np.linspace(0, 90, 500)
h_vec = np.linspace(0, g.h_atm, 500)
th_list_rad = theta_rad(theta_list)
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list, g.l(th_list_rad) / 1e5,
lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'path length $l(\theta)$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list,
np.arccos(g.cos_th_star(th_list_rad)) / np.pi * 180.,
lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'$\theta^*$ at top of the atm.')
plt.ylim([0, 90])
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(h_vec / 1e5, g.delta_l(h_vec, theta_rad(85.)) / 1e5,
lw=2)
plt.ylabel(r'Path length $\Delta l(h)$ in km')
plt.xlabel(r'atm. height $h_{atm}$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
for theta in [30., 60., 70., 80., 85., 90.]:
theta_path = theta_rad(theta)
delta_l_vec = np.linspace(0, g.l(theta_path), 1000)
plt.plot(delta_l_vec / 1e5, g.h(delta_l_vec, theta_path) / 1e5,
label=r'${0}^o$'.format(theta), lw=2)
plt.legend()
plt.xlabel(r'path length $\Delta l$ [km]')
plt.ylabel(r'atm. height $h_{atm}(\Delta l, \theta)$ [km]')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
Attributes:
h_obs (float): observation level height [cm]
h_atm (float): top of the atmosphere [cm]
r_E (float): radius Earth [cm]
r_top (float): radius at top of the atmosphere [cm]
r_obs (float): radius at observation level [cm]
"""
def __init__(self):
self.h_obs = config.h_obs * 1e2 # cm
self.h_atm = config.h_atm * 1e2 # cm
self.r_E = config.r_E * 1e2 # cm
self.r_top = self.r_E + self.h_atm
self.r_obs = self.r_E + self.h_obs
def _A_1(self, theta):
r"""Segment length :math:`A1(\theta)` in cm.
"""
return self.r_obs * np.cos(theta)
def _A_2(self, theta):
r"""Segment length :math:`A2(\theta)` in cm.
"""
return self.r_obs * np.sin(theta)
def l(self, theta):
r"""Returns path length in [cm] for given zenith
angle :math:`\theta` [rad].
"""
return (np.sqrt(self.r_top**2 - self._A_2(theta)**2) -
self._A_1(theta))
def cos_th_star(self, theta):
r"""Returns the zenith angle at atmospheric boarder
:math:`\cos(\theta^*)` in [rad] as a function of zenith at detector.
"""
return (self._A_1(theta) + self.l(theta)) / self.r_top
def h(self, dl, theta):
r"""Height above surface at distance :math:`dl` counted from the beginning
of path :math:`l(\theta)` in cm.
"""
return np.sqrt(
self._A_2(theta)**2 +
(self._A_1(theta) + self.l(theta) - dl)**2) - self.r_E
def delta_l(self, h, theta):
r"""Distance :math:`dl` covered along path :math:`l(\theta)`
as a function of current height. Inverse to :func:`h`.
"""
return (self._A_1(theta) + self.l(theta) -
np.sqrt((h + self.r_E)**2 - self._A_2(theta)**2))
def chirkin_cos_theta_star(costheta):
r""":math:`\cos(\theta^*)` parameterization.
This function returns the equivalent zenith angle for
for very inclined showers. It is based on a CORSIKA study by
`D. Chirkin, hep-ph/0407078v1, 2004
<http://arxiv.org/abs/hep-ph/0407078v1>`_.
Args:
costheta (float): :math:`\cos(\theta)` in [rad]
Returns:
float: :math:`\cos(\theta*)` in [rad]
"""
p1 = 0.102573
p2 = -0.068287
p3 = 0.958633
p4 = 0.0407253
p5 = 0.817285
x = costheta
return np.sqrt(
(x**2 + p1**2 + p2 * x**p3 + p4 * x**p5) / (1 + p1**2 + p2 + p4))
if __name__ == "__main__":
import matplotlib.pyplot as plt
earth = EarthGeometry()
theta_list = np.linspace(0, 90, 500)
h_vec = np.linspace(0, earth.h_atm, 500)
th_list_rad = theta_rad(theta_list)
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list, earth.l(th_list_rad) / 1e5, lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'path length $l(\theta)$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list,
np.arccos(earth.cos_th_star(th_list_rad)) / np.pi * 180.,
lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'$\theta^*$ at top of the atm.')
plt.ylim([0, 90])
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(h_vec / 1e5, earth.delta_l(h_vec, theta_rad(85.)) / 1e5, lw=2)
plt.ylabel(r'Path length $\Delta l(h)$ in km')
plt.xlabel(r'atm. height $h_{atm}$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
for theta in [30., 60., 70., 80., 85., 90.]:
theta_path = theta_rad(theta)
delta_l_vec = np.linspace(0, earth.l(theta_path), 1000)
plt.plot(delta_l_vec / 1e5,
earth.h(delta_l_vec, theta_path) / 1e5,
label=r'${0}^o$'.format(theta),
lw=2)
plt.legend()
plt.xlabel(r'path length $\Delta l$ [km]')
plt.ylabel(r'atm. height $h_{atm}(\Delta l, \theta)$ [km]')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
|
{"hexsha": "9f8f7e55a5b978802a081784dabd7196e7c53257", "size": 7994, "ext": "py", "lang": "Python", "max_stars_repo_path": "MCEq/geometry/geometry.py", "max_stars_repo_name": "joheinze/MCEq", "max_stars_repo_head_hexsha": "7f10aae90d1714997216edcf82a099628d1ff3c6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2015-03-24T15:53:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:23:42.000Z", "max_issues_repo_path": "MCEq/geometry/geometry.py", "max_issues_repo_name": "joheinze/MCEq", "max_issues_repo_head_hexsha": "7f10aae90d1714997216edcf82a099628d1ff3c6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2017-04-26T07:41:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-04T05:49:44.000Z", "max_forks_repo_path": "MCEq/geometry/geometry.py", "max_forks_repo_name": "joheinze/MCEq", "max_forks_repo_head_hexsha": "7f10aae90d1714997216edcf82a099628d1ff3c6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-03-03T02:56:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T10:48:48.000Z", "avg_line_length": 34.6060606061, "max_line_length": 82, "alphanum_fraction": 0.5850637978, "include": true, "reason": "import numpy", "num_tokens": 2360}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import time
import argparse
import torch
import torchvision
import numpy as np
from torch.utils.data import DataLoader
from dataset.datasets import WLFWDatasets
from models.pfld import PFLDbackbone, AuxiliaryNet, PFLDLoss
def validate(wlfw_val_dataloader, plfd_backbone, auxiliarynet):
plfd_backbone.eval()
auxiliarynet.eval()
with torch.no_grad():
losses, losses_ION = [], []
for idx, (img, landmark_gt, attribute_gt, euler_angle_gt) in enumerate(wlfw_val_dataloader):
img.requires_grad = False
img = img.cuda(non_blocking=True)
attribute_gt.requires_grad = False
attribute_gt = attribute_gt.cuda(non_blocking=True)
landmark_gt.requires_grad = False
landmark_gt = landmark_gt.cuda(non_blocking=True)
euler_angle_gt.requires_grad = False
euler_angle_gt = euler_angle_gt.cuda(non_blocking=True)
plfd_backbone = plfd_backbone.cuda()
auxiliarynet = auxiliarynet.cuda()
_, landmarks = plfd_backbone(img)
loss = torch.mean(torch.sqrt(torch.sum((landmark_gt - landmarks)**2, axis=1)))
landmarks = landmarks.cpu().numpy()
landmarks = landmarks.reshape(landmarks.shape[0], -1, 2)
landmark_gt = landmark_gt.reshape(landmark_gt.shape[0], -1, 2).cpu().numpy()
error_diff = np.sum(np.sqrt(np.sum((landmark_gt - landmarks) ** 2, axis=2)), axis=1)
interocular_distance = np.sqrt(np.sum((landmarks[:, 60, :] - landmarks[:,72, :]) ** 2, axis=1))
# interpupil_distance = np.sqrt(np.sum((landmarks[:, 60, :] - landmarks[:, 72, :]) ** 2, axis=1))
error_norm = np.mean(error_diff / interocular_distance)
# show result
# show_img = np.array(np.transpose(img[0].cpu().numpy(), (1, 2, 0)))
# show_img = (show_img * 256).astype(np.uint8)
# np.clip(show_img, 0, 255)
# pre_landmark = landmarks[0] * [112, 112]
# cv2.imwrite("xxx.jpg", show_img)
# img_clone = cv2.imread("xxx.jpg")
# for (x, y) in pre_landmark.astype(np.int32):
# print("x:{0:}, y:{1:}".format(x, y))
# cv2.circle(img_clone, (x, y), 1, (255,0,0),-1)
# cv2.imshow("xx.jpg", img_clone)
# cv2.waitKey(0)
losses.append(loss.cpu().numpy())
losses_ION.append(error_norm)
print("NME", np.mean(losses))
print("ION", np.mean(losses_ION))
def main(args):
checkpoint = torch.load(args.model_path)
plfd_backbone = PFLDbackbone().cuda()
auxiliarynet = AuxiliaryNet().cuda()
plfd_backbone.load_state_dict(checkpoint['plfd_backbone'])
auxiliarynet.load_state_dict(checkpoint['auxiliarynet'])
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
wlfw_val_dataloader = DataLoader(WLFWDatasets(args.test_dataset, transform), \
batch_size=args.batch_size, shuffle=False, \
num_workers=args.workers)
validate(wlfw_val_dataloader, plfd_backbone, auxiliarynet)
def parse_args():
parser = argparse.ArgumentParser(description='Testing')
# env
parser.add_argument('--use_gpu', type=bool, default=False)
parser.add_argument('--gpu_ids', type=list, default=[0, 1])
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--batch_size',type=int, default=8)
parser.add_argument('--model_path', type=str, default="./checkpoint/checkpoint.pth.tar" )
parser.add_argument('--test_dataset', type=str, default='./data/test_data/list.txt')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
{"hexsha": "de13d3bcd16b424a616b6216dcae76159904bb66", "size": 3889, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "Ontheway361/pfld-pytorch", "max_stars_repo_head_hexsha": "7c967623b930eba312d092519d804a103332a38a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-11-22T11:06:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-28T09:40:00.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "Ontheway361/pfld-pytorch", "max_issues_repo_head_hexsha": "7c967623b930eba312d092519d804a103332a38a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "Ontheway361/pfld-pytorch", "max_forks_repo_head_hexsha": "7c967623b930eba312d092519d804a103332a38a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-20T11:43:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-20T11:43:47.000Z", "avg_line_length": 35.6788990826, "max_line_length": 109, "alphanum_fraction": 0.6261249679, "include": true, "reason": "import numpy", "num_tokens": 936}
|
theory ITree_Iteration
imports ITree_Divergence ITree_Deadlock
begin
subsection \<open> Iteration \<close>
text \<open> For now we support only basic tail-recursive iteration. \<close>
corec iterate :: "('s \<Rightarrow> bool) \<Rightarrow> ('e, 's) htree \<Rightarrow> ('e, 's) htree" where
"iterate b P s = (if (b s) then (P s \<bind> (\<tau> \<circ> (iterate b P))) else Ret s)"
abbreviation "loop \<equiv> iterate (\<lambda> s. True)"
abbreviation "iter P \<equiv> loop (\<lambda> _. P) ()"
lemma iterate_cond_false [simp]:
"\<not> (b s) \<Longrightarrow> iterate b P s = Ret s"
by (simp add: iterate.code)
lemma iterate_body_nonterminates:
assumes "nonterminating (P s)" "b s"
shows "nonterminating (iterate b P s)"
by (simp add: assms iterate.code)
lemma loop_unfold: "loop P = P ;; (\<tau> \<circ> loop P)"
by (simp add: seq_itree_def kleisli_comp_def fun_eq_iff iterate.code)
lemma loop_Ret: "loop Ret = (\<lambda> s. diverge)"
by (metis Sil_nfp_stabilises bind_Ret comp_apply diverges_then_diverge iterate.code)
lemma iterate_Ret_dest:
"Ret x = iterate b P s \<Longrightarrow> (\<not> (b s) \<and> x = s)"
apply (cases "P s")
apply (metis bind_Ret comp_apply iterate.code itree.distinct(1) itree.sel(1))
apply (metis bind_itree.disc_iff(1) iterate.code itree.disc(2) itree.discI(1) itree.inject(1))
apply (metis bind_Vis iterate.code itree.distinct(3) itree.inject(1))
done
lemma iterate_RetE:
assumes "iterate b P s = Ret x" "\<lbrakk> \<not> (b s); x = s \<rbrakk> \<Longrightarrow> Q"
shows Q
by (metis assms iterate_Ret_dest)
lemma iterate_RetE':
assumes "Ret x = iterate b P s" "\<lbrakk> \<not> (b s); x = s \<rbrakk> \<Longrightarrow> Q"
shows Q
by (metis assms iterate_Ret_dest)
lemma iterate_Sil_dest:
"\<tau> P' = iterate b P s \<Longrightarrow> (b s \<and> ((\<exists> s'. P s = Ret s' \<and> P' = iterate b P s') \<or> (\<exists> P''. P s = \<tau> P'' \<and> P' = (P'' \<bind> \<tau> \<circ> iterate b P))))"
apply (cases "P s")
apply (simp_all)
apply (metis bind_Ret comp_apply iterate.code itree.distinct(1) itree.sel(2))
apply (metis bind_Sil iterate.code itree.distinct(1) itree.inject(2))
apply (metis bind_Vis iterate.code itree.distinct(1) itree.distinct(5))
done
lemma iterate_SilE [elim, consumes 1, case_names initial continue]:
assumes "\<tau> P = iterate b Q s"
"\<And> P'. \<lbrakk> b s; Q s = \<tau> P'; P = (P' \<bind> \<tau> \<circ> iterate b Q) \<rbrakk> \<Longrightarrow> R"
"\<And> s'. \<lbrakk> b s; Q s = Ret s'; P = iterate b Q s' \<rbrakk> \<Longrightarrow> R"
shows R
by (metis assms iterate_Sil_dest)
lemma iterate_Vis_dest:
"Vis F = iterate b Q s \<Longrightarrow> b s \<and> (\<exists> G. Q s = Vis G \<and> F = (map_pfun (\<lambda> x. bind_itree x (\<tau> \<circ> iterate b Q)) G))"
apply (cases "Q s")
apply (simp_all)
apply (metis bind_Ret comp_apply iterate.code itree.simps(7) itree.simps(9))
apply (metis bind_Sil iterate.code itree.distinct(3) itree.distinct(5))
apply (metis bind_Vis iterate.code itree.inject(3) itree.simps(7))
done
lemma iterate_VisE:
assumes "Vis F = iterate b Q s"
"\<And> G. \<lbrakk> b s; Q s = Vis G; F = (map_pfun (\<lambda> x. bind_itree x (\<tau> \<circ> iterate b Q)) G) \<rbrakk> \<Longrightarrow> R"
shows R
by (metis assms(1) assms(2) iterate_Vis_dest)
lemma iterate_VisE'[consumes 1, case_names body]:
assumes "iterate b Q s = Vis F"
"\<And> G. \<lbrakk> b s; Q s = Vis G; F = (map_pfun (\<lambda> x. bind_itree x (\<tau> \<circ> iterate b Q)) G) \<rbrakk> \<Longrightarrow> R"
shows R
by (metis assms(1) assms(2) iterate_Vis_dest)
lemma iterate_trace_to:
assumes "P s \<midarrow>es \<leadsto> Ret s'" "b s"
shows "iterate b P s \<midarrow>es\<leadsto> iterate b P s'"
proof -
have "(P s \<bind> \<tau> \<circ> iterate b P) \<midarrow>es\<leadsto> (Ret s' \<bind> \<tau> \<circ> iterate b P)"
by (meson assms(1) trace_to_bind_left)
thus ?thesis
by (auto simp add: iterate.code assms)
qed
lemma iterate_term_once:
assumes "P s \<midarrow>es \<leadsto> Ret s'" "b s" "\<not> b s'"
shows "iterate b P s \<midarrow>es\<leadsto> Ret s'"
by (metis assms(1) assms(2) assms(3) iterate.code iterate_trace_to)
subsection \<open> Power \<close>
overloading
itreepow \<equiv> "compow :: nat \<Rightarrow> ('e, 's) htree \<Rightarrow> ('e, 's) htree"
begin
fun itreepow :: "nat \<Rightarrow> ('e, 's) htree \<Rightarrow> ('e, 's) htree" where
"itreepow 0 P = Ret" |
"itreepow (Suc n) P = P ;; itreepow n P"
end
subsection \<open> Chains \<close>
type_synonym ('e, 's) chain = "('e list \<times> 's) list"
inductive itree_chain :: "'s \<Rightarrow> ('e, 's) htree \<Rightarrow> ('e list \<times> 's) list \<Rightarrow> 's \<Rightarrow> bool" ("_ \<turnstile> _ \<midarrow>_\<leadsto>\<^sup>* _" [55, 0, 0, 55] 55) where
chain_Nil [intro]: "s \<turnstile> P \<midarrow>[]\<leadsto>\<^sup>* s" |
chain_step [intro]: "\<lbrakk> P(s) \<midarrow>tr\<leadsto> \<checkmark> s\<^sub>0; s\<^sub>0 \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>1 \<rbrakk> \<Longrightarrow> s \<turnstile> P \<midarrow>(tr, s\<^sub>0) # chn\<leadsto>\<^sup>* s\<^sub>1"
inductive_cases
chain_stepE [elim]: "s \<turnstile> P \<midarrow>(tr, s\<^sub>0) # chn\<leadsto>\<^sup>* s\<^sub>1"
lemma chain_last: "\<lbrakk> s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s'; chn \<noteq> [] \<rbrakk> \<Longrightarrow> snd (last chn) = s'"
by (induct rule: itree_chain.induct, auto)
(metis itree_chain.simps list.discI)
lemma chain_appendI: "\<lbrakk> s \<turnstile> P \<midarrow>tr\<^sub>1\<leadsto>\<^sup>* s\<^sub>0; s\<^sub>0 \<turnstile> P \<midarrow>tr\<^sub>2\<leadsto>\<^sup>* s' \<rbrakk> \<Longrightarrow> s \<turnstile> P \<midarrow>tr\<^sub>1 @ tr\<^sub>2\<leadsto>\<^sup>* s'"
by (induct rule: itree_chain.induct, auto simp add: chain_step)
lemma chain_appendD: "s \<turnstile> P \<midarrow>tr\<^sub>1 @ tr\<^sub>2\<leadsto>\<^sup>* s' \<Longrightarrow> \<exists> s\<^sub>0. s \<turnstile> P \<midarrow>tr\<^sub>1\<leadsto>\<^sup>* s\<^sub>0 \<and> s\<^sub>0 \<turnstile> P \<midarrow>tr\<^sub>2\<leadsto>\<^sup>* s'"
apply (induct tr\<^sub>1 arbitrary: s s')
apply (simp)
using chain_Nil apply fastforce
apply (simp)
apply (case_tac a)
apply (meson chain_step chain_stepE)
done
lemma chain_append_iff: "s \<turnstile> P \<midarrow>tr\<^sub>1 @ tr\<^sub>2\<leadsto>\<^sup>* s' \<longleftrightarrow> (\<exists> s\<^sub>0. s \<turnstile> P \<midarrow>tr\<^sub>1\<leadsto>\<^sup>* s\<^sub>0 \<and> s\<^sub>0 \<turnstile> P \<midarrow>tr\<^sub>2\<leadsto>\<^sup>* s')"
by (meson chain_appendD chain_appendI)
definition chain_states :: "('e, 's) chain \<Rightarrow> 's set" where
"chain_states chn = set (map snd chn)"
lemma chain_states_Nil [simp]: "chain_states [] = {}" by (simp add: chain_states_def)
lemma chain_states_Cons [simp]: "chain_states ((tr, s) # chn) = insert s (chain_states chn)"
by (auto simp add: chain_states_def)
definition chain_trace :: "('e, 's) chain \<Rightarrow> 'e list" where
"chain_trace chn = concat (map fst chn)"
lemma chain_trace_Nil [simp]: "chain_trace [] = []" by (simp add: chain_trace_def)
lemma chain_trace_Cons [simp]: "chain_trace ((tr, s) # chn) = tr @ chain_trace chn"
by (simp add: chain_trace_def)
lemma chain_first_step: "\<lbrakk> s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s'; chn \<noteq> [] \<rbrakk> \<Longrightarrow> P s \<midarrow>fst (hd chn)\<leadsto> \<checkmark> (snd (hd chn))"
by (metis chain_stepE list.collapse prod.collapse)
lemma chain_steps: "\<lbrakk> s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s'; length chn > 1; i < length chn - 1 \<rbrakk> \<Longrightarrow> P (snd (chn ! i)) \<midarrow>fst (chn ! Suc i)\<leadsto> \<checkmark> (snd (chn ! Suc i))"
proof (induct arbitrary: i rule: itree_chain.induct)
case (chain_Nil s P)
then show ?case by simp
next
case (chain_step P s tr s\<^sub>0 chn s\<^sub>1)
then show ?case
proof (cases "i = 0")
case True
with chain_step show ?thesis
by (simp, metis chain_first_step hd_conv_nth)
next
case False
with chain_step gr0_conv_Suc show ?thesis
by fastforce
qed
qed
lemma chain_stated_indexed: "(\<forall>s\<in>chain_states chn. B s) \<longleftrightarrow> (\<forall> i<length chn. B (snd (chn ! i)))"
by (auto simp add: chain_states_def, metis in_set_conv_nth snd_eqD)
fun itree_term_chain ::
"_ \<times> 's \<Rightarrow> ('e, 's) htree \<Rightarrow> 'e list \<Rightarrow> 's \<Rightarrow> bool" ("_ \<turnstile> _ \<midarrow>_\<leadsto>\<^sub>\<checkmark> _" [55, 0, 0, 55] 55)
where "(b, s) \<turnstile> P \<midarrow>tr\<leadsto>\<^sub>\<checkmark> s' \<longleftrightarrow> (\<exists> chn s\<^sub>0 tr\<^sub>0. b s \<and> s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>0 \<and> (\<forall>s\<in>chain_states chn. b s) \<and> P s\<^sub>0 \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s' \<and> tr = chain_trace chn @ tr\<^sub>0)"
declare itree_term_chain.simps [simp del]
lemma term_chain_step:
assumes "b s" "P(s) \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s\<^sub>0" "(b, s\<^sub>0) \<turnstile> P \<midarrow>tr\<^sub>1\<leadsto>\<^sub>\<checkmark> s'"
shows "(b, s) \<turnstile> P \<midarrow>tr\<^sub>0 @ tr\<^sub>1\<leadsto>\<^sub>\<checkmark> s'"
proof -
obtain chn s\<^sub>1 tr\<^sub>2 where chn: "b s\<^sub>0" "s\<^sub>0 \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>1" "\<forall>s\<in>chain_states chn. b s" "P s\<^sub>1 \<midarrow>tr\<^sub>2\<leadsto> \<checkmark> s'" "tr\<^sub>1 = chain_trace chn @ tr\<^sub>2"
by (metis assms(3) itree_term_chain.simps)
have chn': "s \<turnstile> P \<midarrow>(tr\<^sub>0, s\<^sub>0) # chn\<leadsto>\<^sup>* s\<^sub>1"
by (simp add: assms(2) chain_step chn(2))
show ?thesis
apply (simp add: itree_term_chain.simps assms)
apply (rule_tac x="(tr\<^sub>0, s\<^sub>0) # chn" in exI)
apply (rule_tac x="s\<^sub>1" in exI)
apply (simp_all add: chn chn')
done
qed
lemma iterate_transition_chain:
assumes "s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s'" "b s" "\<forall> s\<^sub>0\<in>chain_states chn. b s\<^sub>0"
shows "iterate b P s \<midarrow>chain_trace chn\<leadsto> iterate b P s'"
using assms
proof (induct s P chn s' rule: itree_chain.induct)
case (chain_Nil s P)
then show ?case by auto
next
case (chain_step P s tr s\<^sub>0 chn s\<^sub>1)
then show ?case
by simp
(meson iterate_trace_to trace_to_trans)
qed
lemma final_state_in_chain: "\<lbrakk> s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s'; chn \<noteq> [] \<rbrakk> \<Longrightarrow> s' \<in> chain_states chn"
by (drule chain_last, simp, auto simp add: chain_states_def)
lemma iterate_chain_terminates:
assumes "b s" "(b, s) \<turnstile> P \<midarrow>tr\<leadsto>\<^sub>\<checkmark> s'" "\<not> b s'"
shows "iterate b P s \<midarrow>tr\<leadsto> \<checkmark> s'"
proof -
obtain chn s\<^sub>0 tr\<^sub>0 where P: "s \<turnstile> P \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>0" "\<forall>s\<in>chain_states chn. b s" "P s\<^sub>0 \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s'" "tr = chain_trace chn @ tr\<^sub>0"
using assms
by (simp add: itree_term_chain.simps, auto)
have 1: "iterate b P s \<midarrow>chain_trace chn\<leadsto> iterate b P s\<^sub>0"
by (simp add: P(1) P(2) assms(1) iterate_transition_chain)
have 2: "iterate b P s\<^sub>0 \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s'"
proof -
have "b s\<^sub>0"
by (metis P(1) P(2) assms(1) final_state_in_chain itree_chain.cases list.discI)
thus ?thesis
by (simp add: P(3) assms(3) iterate_term_once)
qed
show ?thesis
using "1" "2" P(4) trace_to_trans by blast
qed
lemmas disj_cases[consumes 1, case_names disj1 disj2] = disjE
lemma bind_extra_tauE:
assumes
"(P \<bind> \<tau> \<circ> \<checkmark>) \<midarrow>tr\<leadsto> P'"
"\<And>P\<^sub>0. \<lbrakk> P \<midarrow>tr\<leadsto> P\<^sub>0; P' = P\<^sub>0 \<bind> \<tau> \<circ> \<checkmark> \<rbrakk> \<Longrightarrow> thesis"
"\<And>x. \<lbrakk> P \<midarrow>tr\<leadsto> Ret x; P' = Ret x \<rbrakk> \<Longrightarrow> thesis"
shows thesis
using assms
by (auto elim!: trace_to_bindE)
(metis Ret_trns bind_Ret comp_apply self_append_conv trace_to_SilE)
text \<open> The next theorem states is a general law for extracting chains from prefixed iterations.
We adopt the prefixed pattern (@{term "Q \<bind> iterate b B"} so that the inductive proof goes through.
Whenever @{term "(Q \<bind> iterate b B) \<midarrow>tr\<leadsto> R"} there are two possibilities. (1) The prefix @{term Q}
performs the transition, and @{term "iterate b B"} is the continuation. (2) The prefix @{term Q}
terminates in a state @{term "s"}, having done a prefix of the trace, and then there is a chain of
iterations of the loop body. Finally, it is possible that the body makes partial progress, leading
to another continuation. The overall trace is consists of (a) the trace contributed by @{term Q};
(b) the trace contributed in the chain; and (c) the trace contributed by partial execution of
they body @{term B}.
\<close>
theorem prefixed_iterate_chain:
fixes B :: "('e, 's) htree"
assumes "(Q \<bind> iterate b B) \<midarrow>tr\<leadsto> R"
shows "(\<exists> Q'. Q \<midarrow>tr\<leadsto> Q' \<and> R = Q' \<bind> iterate b B)
\<or> (\<exists> s chn s\<^sub>0 tr\<^sub>0 tr\<^sub>1 P' n.
Q \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s \<and> b s \<and> s \<turnstile> B \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>0 \<and> (\<forall> s\<in>chain_states chn. b s) \<and> B s\<^sub>0 \<midarrow>tr\<^sub>1\<leadsto> P'
\<and> tr = tr\<^sub>0 @ chain_trace chn @ tr\<^sub>1
\<and> R = P' \<bind> Sils n \<circ> iterate b B \<and> n \<le> 1)"
using assms
\<comment> \<open> We begin the proof by induction on the transition relation. This leads to three top-level
cases corresponding to the three possible ways a transition is constructed in the inductive predicate. \<close>
proof (induct "Q \<bind> iterate b B" tr R arbitrary: Q rule: trace_to.induct)
\<comment> \<open> If the transition is empty, and so @{term "R = Q \<bind> iterate b B"}, then the proof is trivial. \<close>
case trace_to_Nil
then show ?case
by blast
next
text \<open> If the transition is a @{term "\<tau>"} then we need to further determine whether it originates
in @{term Q} or in the loop @{term "iterate b B"}. \<close>
case (trace_to_Sil P tr P')
have "\<tau> P = Q \<bind> iterate b B"
by (simp add: trace_to_Sil.hyps(3))
thus ?case
\<comment> \<open> We split on these two possibilities below. \<close>
proof (cases rule: bind_SilE')
case (initial Q\<^sub>0)
note Q_def = this(1) and P_def = this(2)
from trace_to_Sil.hyps(2)[of Q\<^sub>0, OF initial(2)] show ?thesis
\<comment> \<open> If it originates in @{term Q}, we need to further split the inductive hypotheses. Either
the remainder of @{term Q} (@{term Q\<^sub>0}) can reach @{term R}, or else the loop body has
executed, and so there is a chain. \<close>
proof (cases rule: disj_cases)
case disj1
then show ?thesis
using trace_to_Sil P_def Q_def by blast
next
case disj2
then obtain s s\<^sub>0 ::"'s" and chn::"('e,'s) chain" and tr\<^sub>0 tr\<^sub>1 :: "'e list" and B' :: "('e, 's) itree" and n :: nat
where steps: "Q\<^sub>0 \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s" "b s" "s \<turnstile> B \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>0" "\<forall> s\<in>chain_states chn. b s" "B s\<^sub>0 \<midarrow>tr\<^sub>1\<leadsto> B'"
"tr = tr\<^sub>0 @ chain_trace chn @ tr\<^sub>1" "P' = B' \<bind> Sils n \<circ> iterate b B" "n \<le> 1"
by blast
show ?thesis
apply (simp add: Q_def P_def)
apply (rule_tac disjI2)
apply (rule_tac x="s" in exI)
apply (rule_tac x="chn" in exI)
apply (rule_tac x="s\<^sub>0" in exI)
apply (rule_tac x="tr\<^sub>0" in exI)
apply (simp add: steps)
using steps(5) steps(8) apply auto
done
qed
next
case (continue s)
note defs = this
with defs(2) show ?thesis
\<comment> \<open> If the @{term \<tau>} originates in the loop, then again we need two cases: (1) it originates
in the first execution of body, or (2) some further iterations. \<close>
proof (cases rule: iterate_SilE)
case (continue s')
with trace_to_Sil(2)[of "Ret s' :: ('e, 's) itree", simplified, OF continue(3)] show ?thesis
proof (cases rule: disj_cases)
case disj1
with continue show ?thesis
apply (simp add: defs)
apply (rule_tac disjI2)
apply (rule_tac x="[]" in exI)
apply (rule_tac x="s" in exI)
apply (auto)
apply (metis Sils.simps(1) bot_nat_0.extremum)
done
next
case disj2
then obtain chn s\<^sub>0 tr\<^sub>1 P'' n where P'': "b s'" "s' \<turnstile> B \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>0" "\<forall> s\<in>chain_states chn. b s" "B s\<^sub>0 \<midarrow>tr\<^sub>1\<leadsto> P''" "tr = chain_trace chn @ tr\<^sub>1" "P' = P'' \<bind> Sils n \<circ> iterate b B" "n \<le> 1"
by auto
with continue show ?thesis
apply (simp add: defs)
apply (rule_tac disjI2)
apply (rule_tac x="([], s') # chn" in exI)
apply (rule_tac x="s\<^sub>0" in exI)
apply auto
done
qed
next
\<comment> \<open> The prefix terminated in state @{term s}, and body @{term B} made partial progress. \<close>
case (initial P\<^sub>0)
hence P: "P = P\<^sub>0 \<bind> \<tau> \<circ> \<checkmark> \<bind> iterate b B"
by (simp add: bind_itree_assoc[THEN sym] comp_def)
from trace_to_Sil(2)[of "P\<^sub>0 \<bind> Sil \<circ> Ret", OF P] initial(1)
show ?thesis
proof (cases rule: disj_cases)
case disj1
with initial(1,2) show ?thesis
apply (simp add: defs)
apply (rule disjI2)
apply (rule_tac x="[]" in exI)
apply (rule_tac x="s" in exI)
apply (auto)
apply (erule bind_extra_tauE)
apply (simp)
apply (rule_tac x="P\<^sub>0'" in exI)
apply (auto simp add: bind_itree_assoc[THEN sym] comp_def)
apply (rule_tac x="1" in exI)
apply simp
apply (metis Sils.simps(1) bind_Ret bot_nat_0.extremum trace_to.trace_to_Sil)
done
next
case disj2
then obtain s\<^sub>0 chn s\<^sub>1 tr\<^sub>0 tr\<^sub>1 P'' n
where P\<^sub>0: "(P\<^sub>0 \<bind> \<tau> \<circ> \<checkmark>) \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s\<^sub>0" "b s\<^sub>0" "s\<^sub>0 \<turnstile> B \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>1" "\<forall> s\<in>chain_states chn. b s"
"B s\<^sub>1 \<midarrow>tr\<^sub>1\<leadsto> P''" "tr = tr\<^sub>0 @ chain_trace chn @ tr\<^sub>1" "P' = P'' \<bind> Sils n \<circ> iterate b B" "n \<le> 1"
by auto
then show ?thesis
apply (simp add: defs)
apply (rule_tac disjI2)
apply auto
using initial(1) apply fastforce
apply (rule_tac x="(tr\<^sub>0, s\<^sub>0) # chn" in exI)
apply (rule_tac x="s\<^sub>1" in exI)
apply auto
apply (simp add: chain_step initial(2) trace_to_post_Sil_iff)
done
qed
qed
qed
next
case (trace_to_Vis e F tr P')
hence "Vis F = Q \<bind> iterate b B" by simp
thus ?case
proof (cases rule: bind_VisE')
case (initial F')
have F: "F(e)\<^sub>p = F'(e)\<^sub>p \<bind> iterate b B"
using initial(2) trace_to_Vis.hyps(1) by auto
from trace_to_Vis(3)[of "F'(e)\<^sub>p", OF F] show ?thesis
proof (cases rule: disj_cases)
case disj1
then show ?thesis
using initial(1) initial(2) trace_to_Vis.hyps(1) by auto
next
case disj2
then show ?thesis
by (metis append_Cons initial(1) initial(2) pdom_map_pfun trace_to.trace_to_Vis trace_to_Vis.hyps(1))
qed
next
case (continue s)
from continue(2) show ?thesis
proof (cases rule: iterate_VisE')
case (body G)
hence "F(e)\<^sub>p = G(e)\<^sub>p \<bind> \<tau> \<circ> iterate b B"
using trace_to_Vis.hyps(1) by auto
hence F: "F(e)\<^sub>p = G(e)\<^sub>p \<bind> \<tau> \<circ> \<checkmark> \<bind> iterate b B"
by (simp add: bind_itree_assoc[THEN sym] comp_def)
from trace_to_Vis(3)[of "G(e)\<^sub>p \<bind> Sil \<circ> Ret", OF F] show ?thesis
proof (cases rule: disj_cases)
case disj1
then obtain Q' where "(G(e)\<^sub>p \<bind> \<tau> \<circ> \<checkmark>) \<midarrow>tr\<leadsto> Q'" "P' = Q' \<bind> iterate b B"
by auto
with trace_to_Vis(1) continue(1) body show ?thesis
apply (simp)
apply (rule_tac x="[]" in exI)
apply (rule_tac x="s" in exI)
apply auto
apply (erule bind_extra_tauE)
apply (rule_tac x="P\<^sub>0" in exI)
apply auto
apply (rule_tac x="1" in exI)
apply (simp add: bind_itree_assoc[THEN sym] comp_def)
apply (metis Sils.simps(1) bind_Ret bot_nat_0.extremum comp_eq_dest_lhs)
done
next
case disj2
then obtain s\<^sub>0 chn s\<^sub>1 tr\<^sub>0 tr\<^sub>1 P'' n
where G: "(G(e)\<^sub>p \<bind> \<tau> \<circ> \<checkmark>) \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s\<^sub>0" "b s\<^sub>0"
and chn: "s\<^sub>0 \<turnstile> B \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>1" "\<forall> s\<in>chain_states chn. b s" "B s\<^sub>1 \<midarrow>tr\<^sub>1\<leadsto> P''" "tr = tr\<^sub>0 @ chain_trace chn @ tr\<^sub>1" and P': "P' = P'' \<bind> Sils n \<circ> iterate b B" "n \<le> 1"
by auto
from G trace_to_Vis(1,2) continue(1) body P' chn show ?thesis
apply (simp)
apply (erule bind_extra_tauE)
apply (rule_tac x="(e # tr, s\<^sub>0) # chn" in exI)
apply (rule_tac x="s\<^sub>1" in exI)
apply auto
apply (rule_tac x="(e # tr\<^sub>0, s\<^sub>0) # chn" in exI)
apply (rule_tac x="s\<^sub>1" in exI)
apply auto
done
qed
qed
qed
qed
lemma iterate_chain [consumes 1, case_names iterates terms]:
assumes
"iterate b B s \<midarrow>tr\<leadsto> R"
"\<And> chn s\<^sub>0 tr\<^sub>0 P' n.
\<lbrakk> b s;
s \<turnstile> B \<midarrow>chn\<leadsto>\<^sup>* s\<^sub>0;
\<forall> s\<in>chain_states chn. b s;
B s\<^sub>0 \<midarrow>tr\<^sub>0\<leadsto> P';
tr = chain_trace chn @ tr\<^sub>0;
R = P' \<bind> Sils n \<circ> iterate b B;
n \<le> 1
\<rbrakk> \<Longrightarrow> P"
"\<lbrakk> \<not> b s; tr = []; R = \<checkmark> s \<rbrakk> \<Longrightarrow> P"
shows P
proof (cases "b s")
case True
show ?thesis
using prefixed_iterate_chain[of "\<checkmark> s", simplified, OF assms(1)]
proof (cases rule: disj_cases)
case disj1
then show ?thesis
by (rule_tac assms(2)[of "[]" s "[]" "B s" 1, simplified], auto simp add: iterate.code comp_def assms True)
next
case disj2
then show ?thesis
using assms(2) by force
qed
next
case False
thus ?thesis
using assms(1) assms(3) by force
qed
lemma iterate_terminates_chain:
assumes
"iterate b B s \<midarrow>tr\<leadsto> \<checkmark> s'"
"\<lbrakk> b s; (b, s) \<turnstile> B \<midarrow>tr\<leadsto>\<^sub>\<checkmark> s'; \<not> b s' \<rbrakk> \<Longrightarrow> P"
"\<lbrakk> \<not> b s; tr = []; s' = s \<rbrakk> \<Longrightarrow> P"
shows P
using assms
proof (cases rule: iterate_chain)
case (iterates chn s\<^sub>0 tr\<^sub>0 P' n)
hence P': "P' = \<checkmark> s'" "\<not> b s'"
by (auto elim!: bind_RetE', metis Ret_Sils_iff iterate_RetE')+
then show ?thesis
by (metis assms(2) iterates(1-5) itree_term_chain.simps)
next
case terms
then show ?thesis
using assms(3) by fastforce
qed
lemma iterate_term_chain_iff:
"iterate b B s \<midarrow>tr\<leadsto> \<checkmark> s' \<longleftrightarrow>
((\<not> b s \<and> s = s' \<and> tr = []) \<or> (b s \<and> (b, s) \<turnstile> B \<midarrow>tr\<leadsto>\<^sub>\<checkmark> s' \<and> \<not> b s'))"
proof (cases "b s")
case True
then show ?thesis
by (metis iterate_chain_terminates iterate_terminates_chain)
next
case False
then show ?thesis
by force
qed
text \<open> If @{term P} is an invariant of a chain for process @{term C}, then the invariant holds
for every element of the looped process @{term C}. \<close>
lemma chain_invariant:
assumes
"B s" "P s"
"\<And> s s'. \<lbrakk> B s; P s; s' \<in> \<^bold>R(C s) \<rbrakk> \<Longrightarrow> P s'"
"s \<turnstile> C \<midarrow>chn\<leadsto>\<^sup>* s'"
"\<forall> s\<^sub>0\<in>chain_states chn. B s\<^sub>0"
shows "\<forall> s\<^sub>0\<in>chain_states chn. P s\<^sub>0"
proof -
have "\<forall>i<length chn. P (snd (chn ! i))"
proof (clarify)
fix i
assume i: "i < length chn"
thus "P (snd (chn ! i))"
proof (induct i)
case 0
hence "C s \<midarrow>fst (chn ! 0)\<leadsto> \<checkmark> (snd (chn ! 0))"
by (metis assms(4) chain_first_step hd_conv_nth length_greater_0_conv)
thus ?case
by (meson assms(1) assms(2) assms(3) retvals_traceI)
next
case (Suc i)
hence "C (snd (chn ! i)) \<midarrow>fst (chn ! Suc i)\<leadsto> \<checkmark> (snd (chn ! Suc i))"
using assms(4) chain_steps by fastforce
moreover have "P (snd (chn ! i))"
by (simp add: Suc.hyps Suc.prems Suc_lessD)
moreover have "B (snd (chn ! i))"
by (simp add: Suc.prems Suc_lessD assms(5) chain_states_def)
ultimately show ?case
by (meson assms(3) retvals_traceI)
qed
qed
thus ?thesis
by (simp add: chain_stated_indexed)
qed
lemma chain_invariant_simple:
assumes
"P s"
"\<And> s s'. \<lbrakk> P s; s' \<in> \<^bold>R(C s) \<rbrakk> \<Longrightarrow> P s'"
"s \<turnstile> C \<midarrow>chn\<leadsto>\<^sup>* s'"
shows "\<forall> s\<^sub>0\<in>chain_states chn. P s\<^sub>0"
using assms
by (rule_tac chain_invariant[of "\<lambda> s. True" s P C chn s'], auto)
text \<open> We can establish termination, as usual, with an variant function. Here, ``terminates'' means
that an ITree may terminate. \<close>
definition terminates :: "('e, 's) itree \<Rightarrow> bool" where
"terminates P = (\<exists> tr s'. P \<midarrow>tr\<leadsto> \<checkmark> s')"
lemma terminates_Ret: "terminates (\<checkmark> s')"
by (simp add: terminates_def)
lemma terminates_Sil: "terminates (Sil P) = terminates P"
by (simp add: terminates_def)
lemma terminates_Sils: "terminates (Sils n P) = terminates P"
by (simp add: terminates_def)
lemma terminates_bind:
assumes "terminates P" "\<And> v. v \<in> \<^bold>R(P) \<Longrightarrow> terminates(Q v)"
shows "terminates (P \<bind> Q)"
by (meson assms(1) assms(2) retvals_traceI terminates_def trace_to_bind)
lemma not_terminates_diverge:
"terminates diverge = False"
by (meson diverge_no_Ret_trans terminates_def)
text \<open> A terminating pure ITree is divergence free \<close>
lemma terminates_pure_implies_div_free:
assumes "pure_itree P" "terminates P"
shows "div_free P"
by (metis Sils_diverge assms div_free_is_no_divergence no_divergence_def not_terminates_diverge pure_itree_def trace_to_Nil_Sils)
text \<open> The following theorem using both a variant @{term V} and invariant @{term I} to establish
termination. \<close>
lemma wellorder_variant_term_chain:
fixes V :: "'s \<Rightarrow> 'a::wellorder" and I :: "'s \<Rightarrow> bool"
assumes
"\<And> s\<^sub>0. \<lbrakk> b s\<^sub>0; I s\<^sub>0 \<rbrakk> \<Longrightarrow> terminates (B s\<^sub>0)"
"\<And> s\<^sub>0 s\<^sub>1 tr. \<lbrakk> b s\<^sub>0; I s\<^sub>0; B(s\<^sub>0) \<midarrow>tr\<leadsto> \<checkmark> s\<^sub>1 \<rbrakk> \<Longrightarrow> I s\<^sub>1"
"\<And> s\<^sub>0 s\<^sub>1 tr. \<lbrakk> b s\<^sub>0; I s\<^sub>0; B(s\<^sub>0) \<midarrow>tr\<leadsto> \<checkmark> s\<^sub>1 \<rbrakk> \<Longrightarrow> V(s\<^sub>1) < V(s\<^sub>0)"
shows "\<lbrakk> I s; b s \<rbrakk> \<Longrightarrow> \<exists> tr s'. (b, s) \<turnstile> B \<midarrow>tr\<leadsto>\<^sub>\<checkmark> s' \<and> \<not> b s'"
proof (induct "V(s)" arbitrary: s rule: less_induct)
case (less s\<^sub>0)
obtain s\<^sub>1 tr\<^sub>0 where B_next: "B(s\<^sub>0) \<midarrow>tr\<^sub>0\<leadsto> \<checkmark> s\<^sub>1"
by (meson assms(1) less.prems(1) less.prems(2) terminates_def)
have inv: "I s\<^sub>1"
using B_next assms(2) less.prems(1) less.prems(2) by presburger
have dec: "V(s\<^sub>1) < V(s\<^sub>0)"
using B_next assms(3) less.prems(1) less.prems(2) by force
show ?case
proof (cases "b s\<^sub>1")
case True
obtain tr\<^sub>1 s' where chain: "(b, s\<^sub>1) \<turnstile> B \<midarrow>tr\<^sub>1\<leadsto>\<^sub>\<checkmark> s' \<and> \<not> b s'"
using True dec inv less.hyps by presburger
then show ?thesis
by (meson B_next less.prems term_chain_step)
next
case False
then show ?thesis
by (metis B_next iterate_term_chain_iff iterate_term_once less.prems(2))
qed
qed
lemma terminates_iterate_wellorder_variant:
fixes V :: "'s \<Rightarrow> 'a::wellorder" and I :: "'s \<Rightarrow> bool"
assumes
"\<And> s\<^sub>0. \<lbrakk> b s\<^sub>0; I s\<^sub>0 \<rbrakk> \<Longrightarrow> terminates (B s\<^sub>0)"
"\<And> s\<^sub>0 s\<^sub>1 tr. \<lbrakk> b s\<^sub>0; I s\<^sub>0; B(s\<^sub>0) \<midarrow>tr\<leadsto> \<checkmark> s\<^sub>1 \<rbrakk> \<Longrightarrow> I s\<^sub>1"
"\<And> s\<^sub>0 s\<^sub>1 tr. \<lbrakk> b s\<^sub>0; I s\<^sub>0; B(s\<^sub>0) \<midarrow>tr\<leadsto> \<checkmark> s\<^sub>1 \<rbrakk> \<Longrightarrow> V(s\<^sub>1) < V(s\<^sub>0)"
"I s"
shows "terminates (iterate b B s)"
proof (cases "b s")
case True
have "\<exists> tr s'. (b, s) \<turnstile> B \<midarrow>tr\<leadsto>\<^sub>\<checkmark> s' \<and> \<not> b s'"
by (rule wellorder_variant_term_chain[of b I B V], simp_all add: assms True)
then show ?thesis
by (metis iterate_term_chain_iff terminates_def)
next
case False
then show ?thesis
by (simp add: terminates_def)
qed
text \<open> Generalised deadlock freedom check for loops. If @{term P} is sufficient establish deadlock
freedom of @{term C}, and @{term P} is an invariant of @{term C}, which holds also in the initial
state @{term s}, then @{term "loop C s"} is also deadlock free. \<close>
lemma deadlock_free_loop:
assumes cond_dlockf: "\<And> s. P s \<Longrightarrow> deadlock_free (C s)"
and invariant: "\<And> s s'. \<lbrakk> P s; s' \<in> \<^bold>R(C s) \<rbrakk> \<Longrightarrow> P s'"
and initial: "P s"
shows "deadlock_free (loop C s)"
proof (simp add: deadlock_free_def deadlock_def, clarify)
fix tr
assume "loop C s \<midarrow>tr\<leadsto> Vis {\<mapsto>}"
thus False
proof (cases rule: iterate_chain)
case (iterates chn s\<^sub>0 tr\<^sub>0 P' n)
with initial invariant have "\<forall> s\<in>chain_states chn. P s"
by (rule_tac chain_invariant_simple[where s="s" and C="C" and s'="s\<^sub>0"], auto)
hence dlckf_C_s\<^sub>0: "deadlock_free (C s\<^sub>0)"
by (metis cond_dlockf final_state_in_chain initial iterates(2) itree_chain.cases list.distinct(1))
with iterates(6) show False
proof (cases rule: bind_VisE')
case (initial F')
then show ?thesis
by (metis deadlock_def deadlock_free_def dlckf_C_s\<^sub>0 iterates(4) pdom_empty_iff_dom_empty pdom_map_pfun)
next
case (continue s')
have "loop C s' = Vis {\<mapsto>}"
by (metis comp_apply continue(2) deadlock_def deadlock_trace_to trace_of_Sils)
then show ?thesis
by (metis (no_types, lifting) \<open>\<forall>s\<in>chain_states chn. P s\<close> cond_dlockf continue(1) deadlock_def deadlock_free_def deadlock_trace_to final_state_in_chain initial invariant iterate_VisE iterates(2) iterates(4) itree_chain.simps list.distinct(1) pdom_empty_iff_dom_empty pdom_map_pfun pdom_zero retvals_traceI)
qed
next
case terms
then show ?thesis
by blast
qed
qed
end
|
{"author": "isabelle-utp", "repo": "interaction-trees", "sha": "90510d119364f534d2ab61daf2f274060f0a040e", "save_path": "github-repos/isabelle/isabelle-utp-interaction-trees", "path": "github-repos/isabelle/isabelle-utp-interaction-trees/interaction-trees-90510d119364f534d2ab61daf2f274060f0a040e/ITree_Iteration.thy"}
|
[STATEMENT]
lemma del_bal:
assumes "k > 0"
and "root_order k t"
and "bal t"
shows "bal (del k x t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bal (del k x t)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
0 < k
root_order k t
bal t
goal (1 subgoal):
1. bal (del k x t)
[PROOF STEP]
proof(induction k x t rule: del.induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>k x xs. \<lbrakk>0 < k; root_order k (Leaf xs); bal (Leaf xs)\<rbrakk> \<Longrightarrow> bal (del k x (Leaf xs))
2. \<And>k x ts t. \<lbrakk>\<And>xa y. \<lbrakk>(xa, y) = split ts x; y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t); \<And>xa y x21 x22 xb ya. \<lbrakk>(xa, y) = split ts x; y = x21 # x22; (xb, ya) = x21; 0 < k; root_order k xb; bal xb\<rbrakk> \<Longrightarrow> bal (del k x xb); 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
case (2 k x ts t)
[PROOF STATE]
proof (state)
this:
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
goal (2 subgoals):
1. \<And>k x xs. \<lbrakk>0 < k; root_order k (Leaf xs); bal (Leaf xs)\<rbrakk> \<Longrightarrow> bal (del k x (Leaf xs))
2. \<And>k x ts t. \<lbrakk>\<And>xa y. \<lbrakk>(xa, y) = split ts x; y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t); \<And>xa y x21 x22 xb ya. \<lbrakk>(xa, y) = split ts x; y = x21 # x22; (xb, ya) = x21; 0 < k; root_order k xb; bal xb\<rbrakk> \<Longrightarrow> bal (del k x xb); 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
[PROOF STEP]
obtain ls rs where list_split: "split ts x = (ls,rs)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
goal (1 subgoal):
1. (\<And>ls rs. split ts x = (ls, rs) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases "split ts x")
[PROOF STATE]
proof (state)
this:
split ts x = (ls, rs)
goal (2 subgoals):
1. \<And>k x xs. \<lbrakk>0 < k; root_order k (Leaf xs); bal (Leaf xs)\<rbrakk> \<Longrightarrow> bal (del k x (Leaf xs))
2. \<And>k x ts t. \<lbrakk>\<And>xa y. \<lbrakk>(xa, y) = split ts x; y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t); \<And>xa y x21 x22 xb ya. \<lbrakk>(xa, y) = split ts x; y = x21 # x22; (xb, ya) = x21; 0 < k; root_order k xb; bal xb\<rbrakk> \<Longrightarrow> bal (del k x xb); 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
split ts x = (ls, rs)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
split ts x = (ls, rs)
goal (1 subgoal):
1. bal (del k x (Node ts t))
[PROOF STEP]
proof (cases rs)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
rs = []
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
rs = []
[PROOF STEP]
have "bal (del k x t)"
[PROOF STATE]
proof (prove)
using this:
rs = []
goal (1 subgoal):
1. bal (del k x t)
[PROOF STEP]
using 2 list_split
[PROOF STATE]
proof (prove)
using this:
rs = []
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
split ts x = (ls, rs)
goal (1 subgoal):
1. bal (del k x t)
[PROOF STEP]
by (simp add: order_impl_root_order)
[PROOF STATE]
proof (state)
this:
bal (del k x t)
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
bal (del k x t)
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
have "height (del k x t) = height t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. height (del k x t) = height t
[PROOF STEP]
using 2 del_height
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
\<lbrakk>0 < ?k; root_order ?k ?t; bal ?t\<rbrakk> \<Longrightarrow> height (del ?k ?x ?t) = height ?t
goal (1 subgoal):
1. height (del k x t) = height t
[PROOF STEP]
by (simp add: order_impl_root_order)
[PROOF STATE]
proof (state)
this:
height (del k x t) = height t
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
height (del k x t) = height t
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
have "ts \<noteq> []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ts \<noteq> []
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
goal (1 subgoal):
1. ts \<noteq> []
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
ts \<noteq> []
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
bal (del k x t)
height (del k x t) = height t
ts \<noteq> []
[PROOF STEP]
have "bal (rebalance_last_tree k ts (del k x t))"
[PROOF STATE]
proof (prove)
using this:
bal (del k x t)
height (del k x t) = height t
ts \<noteq> []
goal (1 subgoal):
1. bal (rebalance_last_tree k ts (del k x t))
[PROOF STEP]
using 2 Nil rebalance_last_tree_bal
[PROOF STATE]
proof (prove)
using this:
bal (del k x t)
height (del k x t) = height t
ts \<noteq> []
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
rs = []
\<lbrakk>bal (Node ?ts ?t); ?ts \<noteq> []\<rbrakk> \<Longrightarrow> bal (rebalance_last_tree ?k ?ts ?t)
goal (1 subgoal):
1. bal (rebalance_last_tree k ts (del k x t))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
bal (rebalance_last_tree k ts (del k x t))
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
bal (rebalance_last_tree k ts (del k x t))
[PROOF STEP]
have "bal (rebalance_last_tree k ls (del k x t))"
[PROOF STATE]
proof (prove)
using this:
bal (rebalance_last_tree k ts (del k x t))
goal (1 subgoal):
1. bal (rebalance_last_tree k ls (del k x t))
[PROOF STEP]
using list_split split_conc Nil
[PROOF STATE]
proof (prove)
using this:
bal (rebalance_last_tree k ts (del k x t))
split ts x = (ls, rs)
split ?xs ?p = (?ls, ?rs) \<Longrightarrow> ?xs = ?ls @ ?rs
rs = []
goal (1 subgoal):
1. bal (rebalance_last_tree k ls (del k x t))
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
bal (rebalance_last_tree k ls (del k x t))
goal (2 subgoals):
1. \<lbrakk>split ts x = (ls, rs); rs = []\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
2. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
bal (rebalance_last_tree k ls (del k x t))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
bal (rebalance_last_tree k ls (del k x t))
goal (1 subgoal):
1. bal (del k x (Node ts t))
[PROOF STEP]
using 2 list_split Nil
[PROOF STATE]
proof (prove)
using this:
bal (rebalance_last_tree k ls (del k x t))
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
split ts x = (ls, rs)
rs = []
goal (1 subgoal):
1. bal (del k x (Node ts t))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
bal (del k x (Node ts t))
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs); rs = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
case (Cons r rs)
[PROOF STATE]
proof (state)
this:
rs__ = r # rs
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
rs__ = r # rs
[PROOF STEP]
obtain sub sep where r_split: "r = (sub,sep)"
[PROOF STATE]
proof (prove)
using this:
rs__ = r # rs
goal (1 subgoal):
1. (\<And>sub sep. r = (sub, sep) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases r)
[PROOF STATE]
proof (state)
this:
r = (sub, sep)
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
r = (sub, sep)
[PROOF STEP]
have sub_height: "height sub = height t" "bal sub"
[PROOF STATE]
proof (prove)
using this:
r = (sub, sep)
goal (1 subgoal):
1. height sub = height t &&& bal sub
[PROOF STEP]
using 2 Cons list_split split_set(1)
[PROOF STATE]
proof (prove)
using this:
r = (sub, sep)
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
rs__ = r # rs
split ts x = (ls, rs__)
split ?ts ?z = (?ls, (?a, ?b) # ?rs) \<Longrightarrow> (?a, ?b) \<in> set ?ts
goal (1 subgoal):
1. height sub = height t &&& bal sub
[PROOF STEP]
by fastforce+
[PROOF STATE]
proof (state)
this:
height sub = height t
bal sub
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
height sub = height t
bal sub
[PROOF STEP]
have "bal (del k x sub)" "height (del k x sub) = height sub"
[PROOF STATE]
proof (prove)
using this:
height sub = height t
bal sub
goal (1 subgoal):
1. bal (del k x sub) &&& height (del k x sub) = height sub
[PROOF STEP]
using sub_height
[PROOF STATE]
proof (prove)
using this:
height sub = height t
bal sub
height sub = height t
bal sub
goal (1 subgoal):
1. bal (del k x sub) &&& height (del k x sub) = height sub
[PROOF STEP]
apply (metis "2.IH"(2) "2.prems"(1) "2.prems"(2) list_split local.Cons order_impl_root_order r_split root_order.simps(2) some_child_sub(1) split_set(1))
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. height (del k x sub) = height sub
[PROOF STEP]
by (metis "2.prems"(1) "2.prems"(2) list_split Cons order_impl_root_order r_split root_order.simps(2) some_child_sub(1) del_height split_set(1) sub_height(2))
[PROOF STATE]
proof (state)
this:
bal (del k x sub)
height (del k x sub) = height sub
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
bal (del k x sub)
height (del k x sub) = height sub
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
have "bal (Node (ls@(sub,sep)#rs) t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bal (Node (ls @ (sub, sep) # rs) t)
[PROOF STEP]
using "2.prems"(3) list_split Cons r_split split_conc
[PROOF STATE]
proof (prove)
using this:
bal (Node ts t)
split ts x = (ls, rs__)
rs__ = r # rs
r = (sub, sep)
split ?xs ?p = (?ls, ?rs) \<Longrightarrow> ?xs = ?ls @ ?rs
goal (1 subgoal):
1. bal (Node (ls @ (sub, sep) # rs) t)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
bal (Node (ls @ (sub, sep) # rs) t)
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
bal (del k x sub)
height (del k x sub) = height sub
bal (Node (ls @ (sub, sep) # rs) t)
[PROOF STEP]
have "bal (Node (ls@(del k x sub,sep)#rs) t)"
[PROOF STATE]
proof (prove)
using this:
bal (del k x sub)
height (del k x sub) = height sub
bal (Node (ls @ (sub, sep) # rs) t)
goal (1 subgoal):
1. bal (Node (ls @ (del k x sub, sep) # rs) t)
[PROOF STEP]
using bal_substitute_subtree[of ls sub sep rs t "del k x sub"]
[PROOF STATE]
proof (prove)
using this:
bal (del k x sub)
height (del k x sub) = height sub
bal (Node (ls @ (sub, sep) # rs) t)
\<lbrakk>bal (Node (ls @ (sub, sep) # rs) t); height sub = height (del k x sub); bal (del k x sub)\<rbrakk> \<Longrightarrow> bal (Node (ls @ (del k x sub, sep) # rs) t)
goal (1 subgoal):
1. bal (Node (ls @ (del k x sub, sep) # rs) t)
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
bal (Node (ls @ (del k x sub, sep) # rs) t)
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
bal (Node (ls @ (del k x sub, sep) # rs) t)
[PROOF STEP]
have "bal (rebalance_middle_tree k ls (del k x sub) sep rs t)"
[PROOF STATE]
proof (prove)
using this:
bal (Node (ls @ (del k x sub, sep) # rs) t)
goal (1 subgoal):
1. bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
[PROOF STEP]
using rebalance_middle_tree_bal[of ls "del k x sub" sep rs t k]
[PROOF STATE]
proof (prove)
using this:
bal (Node (ls @ (del k x sub, sep) # rs) t)
bal (Node (ls @ (del k x sub, sep) # rs) t) \<Longrightarrow> bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
goal (1 subgoal):
1. bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
goal (1 subgoal):
1. \<And>a list. \<lbrakk>split ts x = (ls, rs__); rs__ = a # list\<rbrakk> \<Longrightarrow> bal (del k x (Node ts t))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
goal (1 subgoal):
1. bal (del k x (Node ts t))
[PROOF STEP]
using 2 list_split Cons r_split
[PROOF STATE]
proof (prove)
using this:
bal (rebalance_middle_tree k ls (del k x sub) sep rs t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = []; 0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> bal (del k x t)
\<lbrakk>(?xa, ?y) = split ts x; ?y = ?x21.0 # ?x22.0; (?xb, ?ya) = ?x21.0; 0 < k; root_order k ?xb; bal ?xb\<rbrakk> \<Longrightarrow> bal (del k x ?xb)
0 < k
root_order k (Node ts t)
bal (Node ts t)
split ts x = (ls, rs__)
rs__ = r # rs
r = (sub, sep)
goal (1 subgoal):
1. bal (del k x (Node ts t))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
bal (del k x (Node ts t))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
bal (del k x (Node ts t))
goal (1 subgoal):
1. \<And>k x xs. \<lbrakk>0 < k; root_order k (Leaf xs); bal (Leaf xs)\<rbrakk> \<Longrightarrow> bal (del k x (Leaf xs))
[PROOF STEP]
qed simp
|
{"llama_tokens": 8017, "file": "BTree_BPlusTree_Set", "length": 66}
|
#!/usr/bin/env python3
import numpy as np
import cv2
import face_recognition
import sys
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
from queue import Queue as ImageQueue
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame
from pylibfreenect2 import setGlobalLogger
setGlobalLogger(None)
print("OpenGL Pipeline")
from pylibfreenect2 import OpenGLPacketPipeline
print("Starting Tracking")
def __draw_bbox(valid, frame, bbox, color, text):
if not valid:
return
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
cv2.putText(frame, text, (bbox[0], bbox[1] - 4), \
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
def __scale_frame(frame, scale_factor = 1):
if scale_factor == 1:
return frame
return cv2.resize(frame, (0,0), fx=scale_factor, fy=scale_factor)
def face_locations(image):
pass
class NeuronManager(SyncManager):
pass
NeuronManager.register('get_web_neuron')
NeuronManager.register('get_alexa_neuron')
manager = NeuronManager(address=('', 4081), authkey=b'daisy')
manager.connect()
web_neuron = manager.get_web_neuron()
alexa_neuron = manager.get_alexa_neuron()
faces = {
"JessePai": "../faces/JPai-1.jpg",
# "VladMok": "./faces/Vlad.jpg",
# "TeddyMen": "./faces/TMen-1.jpg"
}
known_faces = {}
for person in faces:
image = face_recognition.load_image_file(faces[person])
print(person)
face_encoding_list = face_recognition.face_encodings(image)
if len(face_encoding_list) > 0:
known_faces[person] = face_encoding_list[0]
else:
print("\tCould not find face for person...")
pipeline = OpenGLPacketPipeline()
target = "JessePai"
fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
print("No device connected!")
serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline = pipeline)
listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)
device.setColorFrameListener(listener)
device.setIrAndDepthFrameListener(listener)
device.start()
registration = Registration(device.getIrCameraParams(),
device.getColorCameraParams())
undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)
bigdepth = Frame(1920, 1082, 4)
trackerObj = None
face_process_frame = True
bbox = None
track_bbox = None
while True:
timer = cv2.getTickCount()
frames = listener.waitForNewFrame()
color = frames["color"]
depth = frames["depth"]
registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)
bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)
face_bbox = None
new_track_bbox = None
face_locations = face_recognition.face_locations(c, number_of_times_to_upsample=0, model="cnn")
face_encodings = face_recognition.face_encodings(c, face_locations)
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(
[known_faces[target]], face_encoding, 0.6)
if len(matches) > 0 and matches[0]:
(top, right, bottom, left) = face_locations[0]
face_bbox = (left, top, right, bottom)
mid_w = int((left + right) / 2)
mid_h = int((top + bottom) / 2)
break
__draw_bbox(face_bbox is not None, c, face_bbox, (0, 0, 255), target)
c = __scale_frame(c, scale_factor = 0.5)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
image = cv2.imencode('.jpg', c)[1].tostring()
web_neuron.update([('image', image)])
listener.release(frames)
self.so.close()
cv2.destroyAllWindows()
device.stop()
device.close()
|
{"hexsha": "e89753a08f5c6f06d3fad38c5c20bb8818c9b58d", "size": 3881, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pure_face_tracking.py", "max_stars_repo_name": "J-Pai/408DaisyJetson", "max_stars_repo_head_hexsha": "a873154325c790303f09ecfc03377066751cd601", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-04T03:03:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-04T03:03:34.000Z", "max_issues_repo_path": "tests/pure_face_tracking.py", "max_issues_repo_name": "J-Pai/408DaisyJetson", "max_issues_repo_head_hexsha": "a873154325c790303f09ecfc03377066751cd601", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/pure_face_tracking.py", "max_forks_repo_name": "J-Pai/408DaisyJetson", "max_forks_repo_head_hexsha": "a873154325c790303f09ecfc03377066751cd601", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-24T19:48:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-24T19:48:57.000Z", "avg_line_length": 27.1398601399, "max_line_length": 99, "alphanum_fraction": 0.7003349652, "include": true, "reason": "import numpy", "num_tokens": 1041}
|
import numpy as np
from matplotlib import pyplot
import qutip
delta = 0.2 * 2*np.pi
eps0 = 0.0 * 2*np.pi
omega = 1.0 * 2*np.pi
A_vec = np.linspace(0, 10, 100) * omega
T = 2*np.pi/omega
tlist = np.linspace(0.0, 10 * T, 101)
psi0 = qutip.basis(2, 0)
q_energies = np.zeros((len(A_vec), 2))
H0 = delta/2.0 * qutip.sigmaz() - eps0/2.0 * qutip.sigmax()
args = omega
for idx, A in enumerate(A_vec):
H1 = A/2.0 * qutip.sigmax()
H = [H0, [H1, lambda t, w: np.sin(w*t)]]
f_modes,f_energies = qutip.floquet_modes(H, T, args, True)
q_energies[idx,:] = f_energies
# plot the results
pyplot.plot(
A_vec/omega, np.real(q_energies[:, 0]) / delta, 'b',
A_vec/omega, np.real(q_energies[:, 1]) / delta, 'r',
)
pyplot.xlabel(r'$A/\omega$')
pyplot.ylabel(r'Quasienergy / $\Delta$')
pyplot.title(r'Floquet quasienergies')
pyplot.show()
|
{"hexsha": "5781e829910948807e4174030b31d5648f267eb9", "size": 851, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/guide/scripts/floquet_ex0.py", "max_stars_repo_name": "camponogaraviera/qutip", "max_stars_repo_head_hexsha": "1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1205, "max_stars_repo_stars_event_min_datetime": "2015-01-02T16:23:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:21:21.000Z", "max_issues_repo_path": "doc/guide/scripts/floquet_ex0.py", "max_issues_repo_name": "camponogaraviera/qutip", "max_issues_repo_head_hexsha": "1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1361, "max_issues_repo_issues_event_min_datetime": "2015-01-09T23:38:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T12:26:07.000Z", "max_forks_repo_path": "doc/guide/scripts/floquet_ex0.py", "max_forks_repo_name": "camponogaraviera/qutip", "max_forks_repo_head_hexsha": "1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 569, "max_forks_repo_forks_event_min_datetime": "2015-01-19T06:15:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T20:43:39.000Z", "avg_line_length": 26.59375, "max_line_length": 62, "alphanum_fraction": 0.6368977673, "include": true, "reason": "import numpy", "num_tokens": 338}
|
#!/usr/bin/env python3
"""
https://www.mathworks.com/help/control/ref/parallel.html
https://www.mathworks.com/help/control/ref/feedback.html
https://www.mathworks.com/help/control/ref/series.html
Transfer functions applys to LTI systems and is defined as
H(s) = Y(s)/X(s) (output/input)
in the laplace domain
In the laplace domain differential equations become rational polynomial equations
so the rules of addition/multiplication is basically adding/multiplying polynomials
Transfer functions can be chained together in the following ways
Cascaded:
H1*H2*H3*H4...
Parallel:
H1+H2+H3+H4...
Feedback:
initial condition
H_f = H1
Hn represents the next transfer function
H_f = 1/(1 + H_f*Hn) (Negative Feedback)
H_f = 1/(1 - H_f*Hn) (Positive Feedback)
"""
from sympy import *
def tfchain(H):
if len(H) == 0:
return
GC = H[0]
GP = H[0]
GF = H[0]
GH = H[0]
for i in range(len(H)-1):
GC *= H[i+1]
GP += H[i+1]
GF = GF/(1 + GF*H[i+1])
GH = GH/(1 - GH*H[i+1])
GC = simplify(expand(simplify(GC)))
GP = simplify(expand(simplify(GP)))
GF = simplify(expand(simplify(GF)))
GH = simplify(expand(simplify(GH)))
print('Transfer Functions = {}'.format(H))
print('Cascaded = {}'.format(GC))
print('Parallel = {}'.format(GP))
print('Negative Feedback = {}'.format(GF))
print('Positive Feedback = {}'.format(GH))
print()
init_printing(use_unicode=True)
s = symbols('s')
tfchain([10/(s*s + 2*s + 10), 5/(s + 5)])
tfchain([(2*s*s + 5*s + 1)/(s*s + 2*s + 3), 5*(s+2)/(s+10)])
|
{"hexsha": "efdb438a4502f760cd21ed3fe79ecc509bdf60c6", "size": 1631, "ext": "py", "lang": "Python", "max_stars_repo_path": "math/controls/tfchain.py", "max_stars_repo_name": "qeedquan/misc_utilities", "max_stars_repo_head_hexsha": "94c6363388662ac8ebbf075b9c853ce6defbb5b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-10-17T18:17:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:02:53.000Z", "max_issues_repo_path": "math/controls/tfchain.py", "max_issues_repo_name": "qeedquan/misc_utilities", "max_issues_repo_head_hexsha": "94c6363388662ac8ebbf075b9c853ce6defbb5b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "math/controls/tfchain.py", "max_forks_repo_name": "qeedquan/misc_utilities", "max_forks_repo_head_hexsha": "94c6363388662ac8ebbf075b9c853ce6defbb5b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-01T13:52:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T09:10:59.000Z", "avg_line_length": 25.0923076923, "max_line_length": 83, "alphanum_fraction": 0.6143470264, "include": true, "reason": "from sympy", "num_tokens": 493}
|
import sys
import numpy
import powerbox
from matplotlib import pyplot
from radiotelescope import RadioTelescope
from skymodel import SkyRealisation
from radiotelescope import ideal_gaussian_beam
from generaltools import from_lm_to_theta_phi
from generaltools import colorbar
import matplotlib.colors as colors
from scipy.signal import convolve2d
sys.path.append("../")
def main():
path = "./Data/MWA_Compact_Coordinates.txt"
plot_folder = "../../Plots/Analytic_Covariance/"
plot_u_dist = False
plot_array_matrix = False
plot_inverse_matrix = False
plot_weights = False
grid_weights = True
binned_weights = True
telescope = RadioTelescope(load=True, path=path)
baseline_lengths = numpy.sqrt(telescope.baseline_table.u_coordinates**2 + telescope.baseline_table.v_coordinates**2)
if plot_u_dist:
figure_u, axes_u = pyplot.subplots(1,1)
axes_u.hist(baseline_lengths, density = True, bins = 100, label = "MWA Phase II Compact")
axes_u.set_xlabel(r"$u\,[\lambda]$")
axes_u.set_ylabel("Baseline PDF")
axes_u.legend()
figure_u.savefig(plot_folder + "MWA_Phase_II_Baseline_PDF.pdf")
array_matrix = matrix_constructor_alternate(telescope)
#
# pyplot.rcParams['xtick.bottom'] = pyplot.rcParams['xtick.labelbottom'] = False
# pyplot.rcParams['xtick.top'] = pyplot.rcParams['xtick.labeltop'] = True
if plot_array_matrix:
figure_amatrix = pyplot.figure(figsize=(250, 10))
axes_amatrix = figure_amatrix.add_subplot(111)
plot_amatrix = axes_amatrix.imshow(array_matrix.T, origin = 'lower')
colorbar(plot_amatrix)
axes_amatrix.set_xlabel("Baseline Number", fontsize = 20)
axes_amatrix.set_ylabel("Antenna Number", fontsize = 20)
figure_amatrix.savefig(plot_folder + "Array_Matrix_Double.pdf")
inverse_array_matrix = numpy.linalg.pinv(array_matrix)
if plot_inverse_matrix:
figure_inverse = pyplot.figure(figsize = (110, 20))
axes_inverse = figure_inverse.add_subplot(111)
plot_inverse = axes_inverse.imshow(numpy.abs(inverse_array_matrix))
colorbar(plot_inverse)
baseline_weights = numpy.sqrt((numpy.abs(inverse_array_matrix[::2, ::2])**2 +
numpy.abs(inverse_array_matrix[1::2, 1::2])**2))
print(f"Every Tile sees {len(baseline_weights[0,:][baseline_weights[0, :] > 1e-4])}")
# baseline_weights = numpy.sqrt(numpy.abs(inverse_array_matrix[:int(len(telescope.antenna_positions.antenna_ids) - 1), :int(len(baseline_lengths))])**2 + \
# numpy.abs(inverse_array_matrix[int(len(telescope.antenna_positions.antenna_ids) -1 ):, :int(len(baseline_lengths)):])**2)
if plot_weights:
figure_weights, axes_weights = pyplot.subplots(1,1)
normalised_weights = axes_weights.imshow(baseline_weights)
axes_weights.set_title("Antenna Baseline Weights")
colorbar(normalised_weights)
# blaah = numpy.unique(baseline_weights)
# figblaah, axblaah = pyplot.subplots(1,1)
# axblaah.hist(baseline_weights.flatten(), bins = 100)
# axblaah.set_yscale('log')
uu_weights = numpy.zeros((len(baseline_lengths), len(baseline_lengths)))
baselines = telescope.baseline_table
antennas = telescope.antenna_positions.antenna_ids
for i in range(len(baseline_lengths)):
index1 = numpy.where(antennas == baselines.antenna_id1[i])[0]
index2 = numpy.where(antennas == baselines.antenna_id2[i])[0]
if index1 == 0:
baseline_weights1 = 0
else:
baseline_weights1 = baseline_weights[index1 - 1, :]
if index2 == 0:
baseline_weights2 = 0
else:
baseline_weights2 = baseline_weights[index2 - 1, :]
uu_weights[i, :] = numpy.sqrt((baseline_weights1**2 + baseline_weights2**2))
u_bins = numpy.linspace(0, numpy.max(baseline_lengths), 101)
bin_size = (u_bins.max() - u_bins.min())/len(u_bins)
sorted_indices = numpy.argsort(baseline_lengths)
sorted_weights = uu_weights[sorted_indices, :][:, sorted_indices]
bin_indices = numpy.digitize(baseline_lengths[sorted_indices], u_bins)
print(f"A uncalibrated baseline sees {len(uu_weights[:, 190][uu_weights[:, 190] > 1e-4])}")
print(f"A calibrated baseline sees {len(uu_weights[190, :][uu_weights[190, :] > 1e-4])}")
print(f"A sorted uncalibrated baseline sees {len(sorted_weights[:, 2489][sorted_weights[:, 2489] > 1e-4])}")
print(f"A sorted calibrated baseline sees {len(sorted_weights[190, :][sorted_weights[190, :] > 1e-4])}")
if grid_weights:
fig_cal, axes_cal = pyplot.subplots(1,2, figsize = (100, 50))
cal_plot = axes_cal[0].imshow(uu_weights, origin = 'lower', interpolation = 'none')
axes_cal[0].set_xlabel("Uncalibrated Baseline Index")
axes_cal[0].set_ylabel("Calibrated Baseline Index")
axes_cal[0].set_title("Quadrature Added Real and Imaginary Weights")
colorbar(cal_plot)
sorted_plot = axes_cal[1].imshow(sorted_weights, interpolation='none', origin='lower')
axes_cal[1].set_xlabel("Uncalibrated Baseline Index")
axes_cal[1].set_title(" Baseline Length Sorted Weights")
colorbar(sorted_plot)
for i in range(len(bin_indices)):
if i == 0:
pass
elif bin_indices[i] == bin_indices[i-1]:
pass
else:
axes_cal[1].axvline(i, linestyle = "-", color = 'gray', alpha = 0.4 )
axes_cal[1].axhline(i, linestyle = "-", color = 'gray', alpha = 0.4)
# unique_values = numpy.unique(u_u_weights)
# figs, axs = pyplot.subplots(1,1)
# axs.hist(unique_values, bins = 1000)
if binned_weights:
bin_counter = numpy.zeros_like(uu_weights)
bin_counter[uu_weights != 0] = 1
uu1, uu2 = numpy.meshgrid(baseline_lengths, baseline_lengths)
flattened_uu1 = uu1.flatten()
flattened_uu2 = uu2.flatten()
computed_weights = numpy.histogram2d(flattened_uu1, flattened_uu2, bins=u_bins,
weights=uu_weights.flatten())
computed_counts = numpy.histogram2d(flattened_uu1, flattened_uu2, bins=u_bins,
weights = bin_counter.flatten())
figure_binned, axes_binned = pyplot.subplots(3, 3, figsize = (12, 15), subplot_kw= dict(aspect = 'equal') )
summed_norm = colors.LogNorm()
counts_norm = colors.LogNorm()
averaged_norm = colors.LogNorm()
summed = axes_binned[0, 1].pcolor(u_bins, u_bins, computed_weights[0], norm = summed_norm)
counts = axes_binned[0, 2].pcolor(u_bins, u_bins, computed_counts[0], norm = counts_norm)
averaged = axes_binned[0, 0].pcolor(u_bins, u_bins, computed_weights[0]/computed_counts[0]/bin_size**2,
norm = averaged_norm)
averaged_cbar = colorbar(averaged)
counts_cbar = colorbar(counts)
summed_cbar = colorbar(summed)
axes_binned[0,1].set_title(r"Summed Weights")
axes_binned[0,2].set_title(r"Baseline Counts")
axes_binned[0,0].set_title(r"Averaged Weights")
baseline_pdf = numpy.histogram(baseline_lengths, bins = u_bins, density = True)
ww1, ww2 = numpy.meshgrid(baseline_pdf[0], baseline_pdf[0])
summed_anorm = colors.LogNorm( )
counts_anorm = colors.LogNorm( )
averaged_anorm = colors.LogNorm( )
approx_sum = convolve2d(numpy.diag(baseline_pdf[0]), ww1, mode = 'same')
#convolve2d(numpy.diag(baseline_pdf[0])*bin_size**2*len(baseline_lengths)**2, ww1*ww2*bin_size**2*len(baseline_lengths)**2, mode = 'same')*ww1*ww2*bin_size**2*len(baseline_lengths)**2
# convolve2d(ww2, convolve2d(numpy.diag(baseline_pdf[0]), ww1, mode = 'same'), mode='same')# (ww1*ww2)*bin_size**2*len(baseline_lengths)
approx_counts = (ww1*ww2)*bin_size**2*len(baseline_lengths)**2
approx_averaged = approx_sum/approx_counts
averaged_aplot = axes_binned[1, 0].pcolor(u_bins, u_bins , approx_averaged, norm = averaged_anorm)
sum_aplot = axes_binned[1, 1].pcolor(u_bins, u_bins, approx_sum, norm=summed_anorm)
counts_aplot = axes_binned[1, 2].pcolor(u_bins, u_bins, approx_counts, norm=counts_anorm)
acbar_sum = colorbar(sum_aplot)
acbar_averaged = colorbar(averaged_aplot)
acbar_counts = colorbar(counts_aplot)
axes_binned[2, 0].set_title(r"Differences")
blaah = axes_binned[2, 0].pcolor(u_bins, u_bins, computed_weights[0]/computed_counts[0] - approx_sum)
axes_binned[2, 0].set_ylabel(r"$u^{\prime}\,[\lambda]$")
colorbar(blaah)
axes_binned[0, 0].set_ylabel(r"$u^{\prime}\,[\lambda]$")
axes_binned[1, 0].set_ylabel(r"$u^{\prime}\,[\lambda]$")
axes_binned[2, 0].set_ylabel(r"$u^{\prime}\,[\lambda]$")
axes_binned[2, 0].set_xlabel(r"$u\,[\lambda]$")
axes_binned[1, 1].set_xlabel(r"$u\,[\lambda]$")
axes_binned[1, 2].set_xlabel(r"$u\,[\lambda]$")
figure_binned.savefig(plot_folder + "Baseline_Weights_uu.pdf")
pyplot.show()
return
def matrix_constructor_alternate(telescope):
antennas = telescope.antenna_positions.antenna_ids
baselines = telescope.baseline_table
array_matrix = numpy.zeros((2 * baselines.number_of_baselines, 2 * len(antennas)))
for i in range(baselines.number_of_baselines):
index1 = numpy.where(antennas == baselines.antenna_id1[i])[0]
index2 = numpy.where(antennas == baselines.antenna_id2[i])[0]
# Fill in the real rows
array_matrix[2 * i, 2 * index1] = 1
array_matrix[2 * i, 2 * index2] = 1
# Fill in the imaginary rows
array_matrix[2 * i + 1, 2 * index1 + 1] = 1
array_matrix[2 * i + 1, 2 * index2 + 1] = -1
constrained_matrix = array_matrix[:, 2:]
return constrained_matrix
def matrix_constructor_sep(telescope):
antennas = telescope.antenna_positions.antenna_ids
baselines = telescope.baseline_table
array_matrix = numpy.zeros((2 * baselines.number_of_baselines, 2 * len(antennas)))
for i in range(baselines.number_of_baselines):
index1 = numpy.where(antennas == baselines.antenna_id1[i])[0]
index2 = numpy.where(antennas == baselines.antenna_id2[i])[0]
# Fill in the real rows
array_matrix[2 * i, 2 * index1] = 1
array_matrix[2 * i, 2 * index2] = 1
# Fill in the imaginary rows
array_matrix[2 * i + 1, 2 * index1 + 1] = 1
array_matrix[2 * i + 1, 2 * index2 + 1] = -1
return array_matrix
def matrix_constructor_sep_shift(telescope):
antennas = telescope.antenna_positions.antenna_ids
baselines = telescope.baseline_table
array_matrix = numpy.zeros((2 * baselines.number_of_baselines, 2 * len(antennas)))
for i in range(baselines.number_of_baselines):
index1 = numpy.where(antennas == baselines.antenna_id1[i])[0]
index2 = numpy.where(antennas == baselines.antenna_id2[i])[0]
# Fill in the real rows
array_matrix[i, index1] = 1
array_matrix[i, index2] = 1
# Fill in the imaginary rows
array_matrix[baselines.number_of_baselines + i, len(antennas) + index1] = 1
array_matrix[baselines.number_of_baselines + i, len(antennas) + index2] = -1
return array_matrix
def matrix_constructor_double(telescope):
antennas = telescope.antenna_positions.antenna_ids
baselines = telescope.baseline_table
array_matrix = numpy.zeros((baselines.number_of_baselines, 2 * len(antennas)))
for i in range(baselines.number_of_baselines):
index1 = numpy.where(antennas == baselines.antenna_id1[i])[0]
index2 = numpy.where(antennas == baselines.antenna_id2[i])[0]
# Fill in the real rows
array_matrix[i, index1] = 1
array_matrix[i, len(antennas) + index2] = 1
return array_matrix
if __name__ == "__main__":
main()
|
{"hexsha": "f4579b33ecea5d3e837232156bf76015e08fc6da", "size": 12112, "ext": "py", "lang": "Python", "max_stars_repo_path": "approximations/baseline_participation_weights.py", "max_stars_repo_name": "ronniyjoseph/Beam-Perturbations", "max_stars_repo_head_hexsha": "0122fed7e3018f2e188e12b62ad760e11f6eb158", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "approximations/baseline_participation_weights.py", "max_issues_repo_name": "ronniyjoseph/Beam-Perturbations", "max_issues_repo_head_hexsha": "0122fed7e3018f2e188e12b62ad760e11f6eb158", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-06-25T02:02:56.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-24T08:12:41.000Z", "max_forks_repo_path": "approximations/baseline_participation_weights.py", "max_forks_repo_name": "ronniyjoseph/Beam-Perturbations", "max_forks_repo_head_hexsha": "0122fed7e3018f2e188e12b62ad760e11f6eb158", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9189189189, "max_line_length": 195, "alphanum_fraction": 0.6625660502, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3253}
|
""" Helpers for randomized testing """
from sympy import I, nsimplify, S, Tuple, Dummy
from random import uniform
def random_complex_number(a=2, b=-1, c=3, d=1, rational=False):
"""
Return a random complex number.
To reduce chance of hitting branch cuts or anything, we guarantee
b <= Im z <= d, a <= Re z <= c
"""
A, B = uniform(a, c), uniform(b, d)
if not rational:
return A + I*B
return nsimplify(A, rational=True) + I*nsimplify(B, rational=True)
def comp(z1, z2, tol):
"""Return a bool indicating whether the error between z1 and z2 is <= tol.
If z2 is non-zero and ``|z1| > 1`` the error is normalized by ``|z1|``, so
if you want the absolute error, call this as ``comp(z1 - z2, 0, tol)``.
"""
if not z1:
z1, z2 = z2, z1
if not z1:
return True
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol
def test_numerically(f, g, z=None, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that f and g agree when evaluated in the argument z.
If z is None, all symbols will be tested. This routine does not test
whether there are Floats present with precision higher than 15 digits
so if there are, your results may not be what you expect due to round-
off errors.
Examples
========
>>> from sympy import sin, cos, S
>>> from sympy.abc import x
>>> from sympy.utilities.randtest import test_numerically as tn
>>> tn(sin(x)**2 + cos(x)**2, 1, x)
True
"""
f, g, z = Tuple(f, g, z)
z = [z] if z else (f.free_symbols | g.free_symbols)
reps = zip(z, [random_complex_number(a, b, c, d) for zi in z])
z1 = f.subs(reps).n()
z2 = g.subs(reps).n()
return comp(z1, z2, tol)
def test_derivative_numerically(f, z, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that the symbolically computed derivative of f
with respect to z is correct.
This routine does not test whether there are Floats present with
precision higher than 15 digits so if there are, your results may
not be what you expect due to round-off errors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> from sympy.utilities.randtest import test_derivative_numerically as td
>>> td(sin(x), x)
True
"""
from sympy.core.function import Derivative
z0 = random_complex_number(a, b, c, d)
f1 = f.diff(z).subs(z, z0)
f2 = Derivative(f, z).doit_numerically(z0)
return comp(f1.n(), f2.n(), tol)
|
{"hexsha": "967467db675e752ecdb26779d55c049076dca1b2", "size": 2608, "ext": "py", "lang": "Python", "max_stars_repo_path": "sympy/utilities/randtest.py", "max_stars_repo_name": "goodok/sympy", "max_stars_repo_head_hexsha": "de84ed2139125a755ea7b6ba91d945d9fbbe5ed9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-05-11T12:26:38.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-19T00:11:03.000Z", "max_issues_repo_path": "sympy/utilities/randtest.py", "max_issues_repo_name": "goodok/sympy", "max_issues_repo_head_hexsha": "de84ed2139125a755ea7b6ba91d945d9fbbe5ed9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sympy/utilities/randtest.py", "max_forks_repo_name": "goodok/sympy", "max_forks_repo_head_hexsha": "de84ed2139125a755ea7b6ba91d945d9fbbe5ed9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0476190476, "max_line_length": 78, "alphanum_fraction": 0.6173312883, "include": true, "reason": "from sympy", "num_tokens": 788}
|
from contextlib import contextmanager
from exptools.logging.tabulate import tabulate
from exptools.logging.console import mkdir_p, colorize
from exptools.logging.autoargs import get_all_parameters
import numpy as np
from collections import OrderedDict, defaultdict
import os, shutil
import os.path as osp
import sys
import datetime
import pandas as pd
import imageio
import csv
import threading
import json
_tb_avaliable = False
tb_writer = None
try:
import tensorboardX
except ImportError as e:
print("TensorboardX is not available in exptools, logging might be limited")
else:
_tb_avaliable = True
class Logger():
""" The interface to handle all logging operations (if you are using this library).
Current logging modalities: text, scalar, image, gif, pointcloud/mesh,
All modalities can be logged in batch, which means the datas should be able to be indexed as data[i]
NOTE: all filename and paths (except self.log_dir) are relative paths related to self.log_dir
"""
def __init__(self,
log_dir, # The abspath of where all log files are put
refresh= False, # if you don't want to resume your experiment, this will remove everything in log_dir
):
self.refresh = refresh
self.log_dir = osp.abspath(log_dir)
mkdir_p(self.log_dir)
self.mp_lock = threading.Lock()
# cleaning the log_dir if necessary
if refresh:
for filename in os.listdir(self.log_dir):
_fp = os.path.join(self.log_dir, filename)
try:
if os.path.isfile(_fp) or os.path.islink(_fp):
os.unlink(_fp)
elif os.path.isdir(_fp):
shutil.rmtree(_fp)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (_fp, e))
# start building all logging stuff
self.tb_writer = None if not _tb_avaliable else tensorboardX.SummaryWriter(logdir= self.log_dir)
self._text_prefix = [] # a stack to set prefix
self._text_files = {} # dict of {filename:file_descriptor}
self._text_default_file = None
# assuming current scalar data can be handled by cluster memory (otherwise, solve later)
self._scalar_prefix = [] # a stack to set prefix
self._scalar_data = {} # a dict of {filename:pandas_dataframe}
self._scalar_default_file = None
self._image_prefix = []
self._gif_prefix = []
self.default_step = 0
def push_text_prefix(self, prefix: str):
self._text_prefix.append(prefix)
def pop_text_prefix(self):
self._text_prefix.pop(-1)
@contextmanager
def text_prefix(self, prefix: str):
self.push_text_prefix(prefix)
yield
self.pop_text_prefix()
def push_scalar_prefix(self, prefix: str):
self._scalar_prefix.append(prefix)
def pop_scalar_prefix(self):
self._scalar_prefix.pop(-1)
@contextmanager
def scalar_prefix(self, prefix: str):
self.push_scalar_prefix(prefix)
yield
self.pop_scalar_prefix()
def push_image_prefix(self, prefix: str):
self._image_prefix.append(prefix)
def pop_image_prefix(self):
self._image_prefix.pop(-1)
@contextmanager
def image_prefix(self, prefix: str):
self.push_image_prefix(prefix)
yield
self.pop_image_prefix()
def push_gif_prefix(self, prefix: str):
self._gif_prefix.append(prefix)
def pop_gif_prefix(self):
self._gif_prefix.pop(-1)
@contextmanager
def gif_prefix(self, prefix: str):
self.push_gif_prefix(prefix)
yield
self.pop_gif_prefix()
def push_prefix(self, prefix: str):
self.push_text_prefix(prefix)
self.push_scalar_prefix(prefix)
self.push_image_prefix(prefix)
self.push_gif_prefix(prefix)
def pop_prefix(self):
self.pop_text_prefix()
self.pop_scalar_prefix()
self.pop_image_prefix()
self.pop_gif_prefix()
@contextmanager
def prefix(self, prefix: str):
""" All modality prefix """
self.push_prefix(prefix)
yield
self.pop_prefix()
def add_text_output(self, filename: str):
if not self._text_default_file:
self._text_default_file = filename
self._text_files[filename] = open(osp.join(self.log_dir, filename), mode= "a")
def remove_text_output(self, filename):
if filename == self._text_default_file:
print(colorize(
"Warning: You are removing default text output",
color= "yellow",
))
self._text_default_file = None
self._text_files[filename].close()
self._text_files.pop(filename)
@contextmanager
def additional_text_output(self, filename):
self.add_text_output(filename)
yield
self.remove_text_output(filename)
def redirect_stdout_to_text_output(self):
""" NOTE: You have to add_text_output before calling this method
"""
sys.stdout = self._text_files[self._text_default_file]
def redirect_stdout_to_console(self):
sys.stdout = sys.__stdout__
def save_param_dict(self, param, filename):
assert isinstance(param, dict)
with open(osp.join(self.log_dir, filename), "w") as fd:
json.dump(param, fd, indent= 4)
def add_scalar_output(self, filename: str):
if not self._scalar_default_file:
self._scalar_default_file = filename
if not self.refresh and osp.isfile(osp.join(self.log_dir, filename)):
self._scalar_data[filename] = pd.read_csv(osp.join(self.log_dir, filename))
else:
self._scalar_data[filename] = pd.DataFrame().append({}, ignore_index= True)
def remove_scalar_output(self, filename= None):
if filename is None: filename = self._scalar_default_file
if filename == self._scalar_default_file:
print(colorize(
"Warning: You are removing default scalar output",
color= "yellow",
))
self._scalar_default_file = None
self._scalar_data[filename].to_csv(osp.join(self.log_dir, filename), index= False)
self._scalar_data.pop(filename)
@contextmanager
def additional_scalar_output(self, filename):
self.add_scalar_output(filename)
yield
self.remove_scalar_output(filename)
def log_text(self, data, step= None,
filename= None,
with_prefix= True,
with_timestamp=True,
color=None
):
if filename is None: filename = self._text_default_file
if step is None: step = self.default_step
out = data
if with_prefix:
for p in self._text_prefix:
out = p + out
if with_timestamp:
now = datetime.datetime.now() # dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
out = "%s | %s" % (timestamp, out)
if color is not None:
out = colorize(out, color)
print(out)
self._text_files[filename].write(out + "\n")
self._text_files[filename].flush()
if not self.tb_writer is None:
self.tb_writer.add_text("text", out, step)
def log_scalar(self, tag, data, step= None, filename= None, with_prefix= True, **kwargs):
"""
@Args:
tag: string;
data: a number (not array)
step: a int of the iteration number (starting from 0). If `filename` provided,
you need to give proper `step` of current `filename` and increment one by one.
"""
if filename is None: filename = self._scalar_default_file
if with_prefix:
for p in self._scalar_prefix:
tag = p + tag
# maintain pandas DataFrame
df_len = len(self._scalar_data[filename])
if step is None: step = self.default_step
if step > (df_len - 1):
for _ in range(step - df_len + 1):
self._scalar_data[filename] = self._scalar_data[filename].append({}, ignore_index= True)
if step > 1:
print(colorize("You might forget to dump_scalar on a regular basis, this might cause the scalar data lost", color= "yellow"))
if not tag in self._scalar_data[filename]:
self._scalar_data[filename][tag] = np.nan
try:
self._scalar_data[filename].loc[step][tag] = data
except KeyError as e:
print(colorize("KeyError: {}".format(e), color= "red"))
print(colorize("You might forget to dump_scalar for your scalar file, check demo script please", color= "yellow"))
exit(-1)
# tensorboardX API
if not self.tb_writer is None:
self.tb_writer.add_scalar(tag, data, step)
def log_scalar_batch(self, tag, data, step= None, filename= None, **kwargs):
""" Record a batch of data with several statictis
data: a array of numbers np.array is better
"""
if not isinstance(data, np.ndarray): data = np.array(data)
if len(data) > 0:
self.log_scalar(tag + "/Average", np.nanmean(data), step, filename, **kwargs)
self.log_scalar(tag + "/Std", np.nanstd(data), step, filename, **kwargs)
self.log_scalar(tag + "/Max", np.nanmax(data), step, filename, **kwargs)
self.log_scalar(tag + "/Min", np.nanmin(data), step, filename, **kwargs)
self.log_scalar(tag + "/Len", np.count_nonzero(~np.isnan(data)), step, filename, **kwargs)
def dump_scalar(self, filename= None):
""" In order to reflect the scalar data to the file and data loss due to program crash
we write current scalar dataframe to csv file
"""
if filename is None: filename = self._scalar_default_file
self._scalar_data[filename].to_csv(osp.join(self.log_dir, filename), index= False)
self.log_text("Dumping scalar data for {}".format(filename), len(self._scalar_data[filename]))
print(tabulate( self._scalar_data[filename].iloc[-1].items() ))
self._scalar_data[filename] = self._scalar_data[filename].append({}, ignore_index= True)
def __old_dump_scalar(self, filename= None):
""" Due to csv feature, you need to dump scalar to csv file. You can
also specify the filename for which file you are dumping to
"""
if filename is None: filename = self._scalar_default_file
current_reader = csv.reader(self._scalar_files[filename])
# print current data
if len(current_reader) == 0:
text_step = 0
else:
text_step = len(current_reader) - 1
self.log_text("Dumping scalar data for {}".format(filename), text_step)
print(tabulate( self._scalar_current_data[filename].items() ))
# check current file keys, and determine whether to rewrite the entire file
if len(current_reader) > 0:
old_keys = next(current_reader)
current_keys = list(self._scalar_current_data[filename].keys()) # a copy of keys
del current_reader
# checking keys
key_unchanged = len(old_keys) == len(current_keys)
for csv_k, data_k in zip(old_keys, current_keys):
if csv_k != data_k:
key_unchanged = False; break
if key_unchanged:
# keep writing
current_writer = csv.DictWriter(self._scalar_files[filename], current_keys)
current_writer.writerow(self._scalar_current_data[filename])
self._scalar_files[filename].flush()
else:
# rewrite the entire csv file (hope this never comes)
keys_to_add = []
for key in old_keys: # if current_keys < old_keys
if not key in current_keys:
self._scalar_current_data[filename][key] = np.nan
with open(osp.join(self.log_dir, self._TEMP_CSV_FILENAME), "w") as new_fd:
old_reader = csv.DictReader(self._scalar_files[filename])
new_writer = csv.DictWriter(new_fd, fieldnames= list(self._scalar_current_data[filename].keys()))
# rewrite old data
for row in old_reader:
row = defaultdict(lambda:np.nan, **row) # if current_keys > old_keys
new_writer.writerow(row)
# write new data
new_writer.writerow(self._scalar_current_data[filename])
new_fd.flush()
# replace file descriptor
self._scalar_files[filename].close()
os.remove(osp.join(self.log_dir, filename)) # NOTE: currently, `filename` is invalid filename
os.rename(
osp.join(self.log_dir, self._TEMP_CSV_FILENAME),
osp.join(self.log_dir, filename),
)
self._scalar_files[filename] = open(osp.join(self.log_dir, filename))
else:
# new file, write directly
del current_reader
file_writer = csv.DictWriter(self._scalar_files[filename], fieldnames= list(self._scalar_files[filename].keys()))
file_writer.writeheader()
file_writer.writerow(self._scalar_current_data[filename])
# clear out current data (buffer)
for k in self._scalar_current_data[filename].keys():
self._scalar_current_data[filename][k] = np.nan
def log_image(self, tag, data, step= None, with_prefix= True, **kwargs):
""" NOTE: data must be (H, W) or (3, H, W) or (4, H, W) from 0-255 uint8
"""
mkdir_p(osp.join(self.log_dir, "image"))
if with_prefix:
for p in self._image_prefix:
tag = p + tag
if step is None: step = self.default_step
filename = osp.join(self.log_dir, "image", "{}-{}.png".format(tag, step))
if len(data.shape) == 3:
imageio.imwrite(filename, np.transpose(data, (1,2,0)), format= "PNG")
else:
imageio.imwrite(filename, data, format= "PNG")
if not self.tb_writer is None:
self.tb_writer.add_image(tag, data, step)
def log_gif(self, tag, data, step= None, duration= 0.1, with_prefix= True, **kwargs):
""" record a series of image as gif into file
NOTE: data must be a sequence of nparray (H, W) or (3, H, W) or (4, H, W) from 0-255 uint8
"""
mkdir_p(osp.join(self.log_dir, "gif"))
if with_prefix:
for p in self._gif_prefix:
tag = p + tag
if step is None: step = self.default_step
filename = osp.join(self.log_dir, "gif", "{}-{}.gif".format(tag, step))
if isinstance(data, np.ndarray) or (len(data) > 0 and len(data[0].shape)) == 3:
imageio.mimwrite(filename, [np.transpose(d, (1,2,0)) for d in data], format= "GIF", duration= duration)
else:
imageio.mimwrite(filename, data, format= "GIF", duration= duration)
# TensorboardX does not support this yet
def dump_data(self):
""" dump all default data handler, and increase default_step by 1
"""
self.default_step += 1
self.dump_scalar()
def dump(self):
return self.dump_data()
def set_step(self, step):
self.default_step = step
def __del__(self):
try:
for _, v in self._text_files.items():
v.close()
except:
print(colorize("Exceptions when closing text logger", color= "yellow"))
try:
for f, d in self._scalar_data.items():
d.to_csv(osp.join(self.log_dir, f), index= False)
except:
print(colorize("Exceptions when closing scalar logger", color= "yellow"))
try:
if not self.tb_writer is None:
self.tb_writer.close()
except:
print(colorize("Exceptions when closing tensorboardX writer", color= "yellow"))
# >>>>>>>>> The followings are APIs for other experiment platforms <<<<<<<<
def _deprecated_warn(self):
print(colorize("You are using dereprecated API of exptools logger", color= "yellow"))
def __getattr__(self, name: str):
if name == "_tb_writer":
self._deprecated_warn()
return self.tb_writer
else:
super(Logger, self).__getattr__(self, name)
@contextmanager
def tabular_prefix(self, key):
self.push_scalar_prefix(key)
yield
self.pop_scalar_prefix()
def record_tabular(self, key, val, step= None):
self._deprecated_warn()
return self.log_scalar(key, val, step)
def record_tabular_misc_stat(self, key, val, step= None):
self._deprecated_warn()
return self.log_scalar_batch(key, val, step)
def dump_tabular(self, *args, **kwargs):
self._deprecated_warn()
return self.dump_data()
def log(self, data, step= 0, *args, **kwargs):
self._deprecated_warn()
return self.log_text(data, step)
def record_image(self, *args, **kwargs):
self._deprecated_warn()
return self.log_image(*args, **kwargs)
def record_gif(self, *args, **kwargs):
self._deprecated_warn()
return self.log_gif(*args, **kwargs)
def set_iteration(self, itr):
self._deprecated_warn()
return self.set_step(itr)
def set_snapshot_dir(self, *args):
self._deprecated_warn()
def get_snapshot_dir(self):
self._deprecated_warn()
return self.log_dir
def set_snapshot_mode(self, mode):
self._deprecated_warn()
self._snapshot_mode = mode
def get_snapshot_mode(self):
self._deprecated_warn()
return self._snapshot_mode
def set_log_tabular_only(self, mode):
self._deprecated_warn()
def set_tf_summary_writter(self, *args, **kwargs):
self._deprecated_warn()
def save_itr_params(self, *args, **kwargs):
self._deprecated_warn()
|
{"hexsha": "a563f82ef8ef5124ae4f2f49deb3e538cd7aa1b4", "size": 18364, "ext": "py", "lang": "Python", "max_stars_repo_path": "exptools/logging/_logger.py", "max_stars_repo_name": "ZiwenZhuang/exptools", "max_stars_repo_head_hexsha": "aa6853f1fb463955e5983a81bfd1d31ba3e7e34a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-04-03T10:41:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T05:18:37.000Z", "max_issues_repo_path": "exptools/logging/_logger.py", "max_issues_repo_name": "ZiwenZhuang/exptools", "max_issues_repo_head_hexsha": "aa6853f1fb463955e5983a81bfd1d31ba3e7e34a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exptools/logging/_logger.py", "max_forks_repo_name": "ZiwenZhuang/exptools", "max_forks_repo_head_hexsha": "aa6853f1fb463955e5983a81bfd1d31ba3e7e34a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T06:30:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T06:30:50.000Z", "avg_line_length": 41.2674157303, "max_line_length": 141, "alphanum_fraction": 0.613428447, "include": true, "reason": "import numpy", "num_tokens": 4114}
|
import cupy as cp
class KMEANS:
#kmeans模型聚类
def __init__(self, k):
self.train_data = None
self.k = k
self.centers = None
self.clusters = None
self.test = None
self.seed = None
self.tolerance = None
self.max_iter = None
def distance(self, vector1, vector2):
return cp.sqrt(cp.sum(cp.square(vector2 - vector1)))
# 初始化k个中心点
def init_centroids(self):
cp.random.seed(self.seed)
n = self.train_data.shape[0]
assert n >= self.k
idxs = cp.random.choice(range(n), self.k, replace=False)
return self.train_data[idxs]
# 计算k个cluster的中心点
def compute_centroids(self):
new_centroids = cp.zeros((self.k, self.train_data.shape[1]))
for i in range(self.k):
new_centroids[i] = cp.mean(self.train_data[self.clusters == i], axis=0)
return new_centroids
# 计算所有样本点和中心点的距离
def compute_distances(self):
double_xy = 2 * self.train_data.dot(self.centers.T)
sq_X = cp.sum(cp.square(self.train_data), axis=1, keepdims=True)
sq_centers = cp.sum(cp.square(self.centers), axis=1)
dists = cp.sqrt(abs(sq_X - double_xy + sq_centers))
return dists
def fit(self, train_data, seed = 110, tolerance = 1e-5, max_iter = 1000):
self.train_data = train_data
self.seed = seed
self.tolerance = tolerance
self.max_iter = max_iter
self.centers = self.init_centroids()
dists = self.compute_distances()
self.clusters = cp.argmin(dists, axis=1)
i = 0
while True:
# 重新计算中心点
new_centers = self.compute_centroids()
if i > max_iter or self.distance(new_centers, self.centers) <= tolerance:
break
self.centers = new_centers
dists = self.compute_distances()
self.clusters = cp.argmin(dists, axis=1)
i += 1
return self.clusters, self.centers
|
{"hexsha": "3f9576e672f0a47bffcb953ddaba849d7bc2fb6a", "size": 2003, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlcu/ml/KMEANS.py", "max_stars_repo_name": "haomingdouranggouqil/cuml", "max_stars_repo_head_hexsha": "4fbbc2bcf381f57333be99fa8490eccb3168b641", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlcu/ml/KMEANS.py", "max_issues_repo_name": "haomingdouranggouqil/cuml", "max_issues_repo_head_hexsha": "4fbbc2bcf381f57333be99fa8490eccb3168b641", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlcu/ml/KMEANS.py", "max_forks_repo_name": "haomingdouranggouqil/cuml", "max_forks_repo_head_hexsha": "4fbbc2bcf381f57333be99fa8490eccb3168b641", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7936507937, "max_line_length": 85, "alphanum_fraction": 0.5966050924, "include": true, "reason": "import cupy", "num_tokens": 532}
|
// Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#include "poplin/Cholesky.hpp"
#include "poplin/MatMul.hpp"
#include "poplin/TriangularSolve.hpp"
#include <boost/assign/list_of.hpp>
#include <boost/optional.hpp>
#include <boost/optional/optional_io.hpp>
#include <boost/program_options.hpp>
#include <boost/random.hpp>
#include <boost/version.hpp>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <poplibs_support/TestDevice.hpp>
#include <poplibs_support/VectorUtils.hpp>
#include <poplibs_test/GeneralMatrixMultiply.hpp>
#include <poplibs_test/Util.hpp>
#include <poplin/MatMul.hpp>
#include <poplin/codelets.hpp>
#include <popops/codelets.hpp>
#include <poputil/TileMapping.hpp>
#include <sstream>
using namespace poplibs_support;
void printArray(std::string name, boost::multi_array<double, 3> a) {
std::cout << name << ": " << std::endl;
std::size_t ng = a.shape()[0];
std::size_t nr = a.shape()[1];
std::size_t nc = a.shape()[2];
for (std::size_t g = 0; g < ng; g++) {
std::cout << g << std::endl;
for (std::size_t r = 0; r < nr; r++) {
std::cout << " ";
for (std::size_t c = 0; c < nc; c++) {
std::cout << std::setw(10) << a[g][r][c];
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
boost::multi_array<double, 3> createPositiveDefiniteMatrix(std::size_t batches,
std::size_t rank) {
boost::multi_array<double, 3> l(boost::extents[batches][rank][rank]);
boost::multi_array<double, 3> pd(boost::extents[batches][rank][rank]);
std::mt19937 randomEngine;
boost::random::uniform_real_distribution<> dist(0.01, 1.0);
for (std::size_t b = 0; b < batches; b++) {
for (std::size_t r = 0; r < rank; r++) {
for (std::size_t c = 0; c < rank; c++) {
double v = dist(randomEngine);
l[b][r][c] = v;
}
}
}
poplibs_test::gemm::generalGroupedMatrixMultiply(l, l, pd, false, true);
for (std::size_t b = 0; b < batches; b++) {
for (std::size_t r = 0; r < rank; r++) {
pd[b][r][r] += rank;
}
}
return pd;
}
const boost::multi_array<double, 3>
maskTriangularMatrix(const boost::multi_array<double, 3> &m,
bool lower = true) {
std::size_t batches = m.shape()[0];
std::size_t rank = m.shape()[1];
boost::multi_array<double, 3> tm(boost::extents[batches][rank][rank]);
for (std::size_t b = 0; b < batches; b++) {
for (std::size_t r = 0; r < rank; r++) {
for (std::size_t c = 0; c < rank; c++) {
if ((lower && c <= r) || (!lower && c >= r))
tm[b][r][c] = m[b][r][c];
else
tm[b][r][c] = 0;
}
}
}
return tm;
}
int main(int argc, char **argv) try {
namespace po = boost::program_options;
DeviceType deviceType;
boost::optional<unsigned> tilesPerIPU;
po::options_description desc("Options");
unsigned numBatches = 1;
unsigned aRank;
unsigned bRank = -1;
bool leftSide = true;
poplar::Type dataType;
boost::optional<unsigned> blockSizeParam;
bool lower = true;
bool unitDiagonal = true;
boost::optional<std::string> profileDir;
// clang-format off
desc.add_options()
("help,h", "produce help message")
("compile-only", "Stop after compilation; don't run the program")
("device-type",
po::value<DeviceType>(&deviceType)->default_value(DeviceType::IpuModel2),
deviceTypeHelp)
("profile", "Output profiling report to standard output")
("profile-dir",
po::value<decltype(profileDir)>(&profileDir)
->default_value(boost::none),
"Write profile files to the specified directory.")
("cholesky", "Run cholesky solver")
("ignore-data", "Don't upload and download the results from the device. "
"Note that this means the result is not validated against the model.")
("tiles-per-ipu", po::value(&tilesPerIPU), "Number of tiles per IPU")
("data-type",
po::value(&dataType)->required(),
"Data Type")
("a-rank",
po::value(&aRank)->required(),
"Rank of the A matrix.")
("b-rank",
po::value(&bRank),
"Rank of the B matrix.")
("batches",
po::value(&numBatches)->default_value(numBatches),
"Number of batch dimensions.")
("left-side",
po::value(&leftSide)->default_value(leftSide),
"Left side - solve AX = B, XA = B overwise.")
("lower",
po::value(&lower)->default_value(lower),
"Generate lower A, upper A overwise.")
("unit-diagonal",
po::value(&unitDiagonal)->default_value(unitDiagonal),
"Assume A has unit diagonal.")
("block-size",
po::value(&blockSizeParam),
"Solver block size if specified, no block solver overwise.")
;
// clang-format on
po::variables_map vm;
try {
const po::positional_options_description p;
po::store(
po::command_line_parser(argc, argv).options(desc).positional(p).run(),
vm);
po::notify(vm);
if (vm.count("help")) {
std::cout << desc << "\n";
return 1;
}
} catch (const boost::program_options::error &e) {
std::cerr << e.what() << std::endl;
return 1;
}
poplar::OptionFlags engineOptions;
if (vm.count("profile") || profileDir) {
engineOptions.set("debug.instrumentCompute", "true");
if (profileDir) {
engineOptions.set("autoReport.all", "true");
engineOptions.set("autoReport.directory", *profileDir);
}
}
const bool runCholesky = vm.count("cholesky");
const bool ignoreData = vm.count("ignore-data");
const unsigned numIPUs = 1;
const bool compileIPUCode = true;
auto device =
tilesPerIPU
? createTestDevice(deviceType, numIPUs, *tilesPerIPU, compileIPUCode)
: createTestDeviceFullSize(deviceType, numIPUs, compileIPUCode);
const auto &target = device.getTarget();
poplar::Graph graph(target);
poplin::addCodelets(graph);
popops::addCodelets(graph);
poplin::matmul::PlanningCache cache;
poplar::program::Sequence uploadProg, prog, downloadProg;
poplar::OptionFlags options;
poplar::DebugContext debugContext;
if (blockSizeParam) {
options.set("blockSize", std::to_string(*blockSizeParam));
}
if (runCholesky) {
bRank = 0;
unitDiagonal = false;
if (!leftSide)
throw poplar::poplar_error(
"left-side must be true when using the cholesky solver.");
} else if (bRank < 0) {
throw poplar::poplar_error(
"--b-rank is mandatory option for triangular solver");
}
std::vector<std::size_t> inputAShape{numBatches, aRank, aRank};
std::vector<std::size_t> inputBShape{numBatches, leftSide ? aRank : bRank,
leftSide ? bRank : aRank};
std::vector<std::pair<poplin::MatMulParams, poplar::OptionFlags>>
matmulOptPairs;
if (runCholesky) {
matmulOptPairs = poplin::getCholeskyMatMulPrePlanParameters(
dataType, inputAShape, lower, options);
} else {
matmulOptPairs = poplin::getTriangularSolveMatMulPrePlanParameters(
dataType, dataType, inputAShape, inputBShape, leftSide, lower, options);
}
std::set<poplin::MatMulPlanParams> params;
for (auto &pair : matmulOptPairs)
params.emplace(&target, pair.first, &pair.second);
preplanMatMuls(params, cache);
poplar::Tensor inputA;
if (runCholesky) {
inputA = poplin::createCholeskyInput(graph, dataType, inputAShape, lower,
debugContext, options, &cache);
} else {
inputA = poplin::createTriangularSolveInputLHS(
graph, dataType, dataType, inputAShape, inputBShape, leftSide,
debugContext, options, &cache);
}
poplar::Tensor inputB;
if (!runCholesky) {
inputB = poplin::createTriangularSolveInputRHS(
graph, dataType, dataType, inputAShape, inputBShape, leftSide,
debugContext, options, &cache);
}
poplar::Tensor out;
if (runCholesky) {
poplin::choleskyInPlace(graph, inputA, lower, prog, debugContext, options,
&cache);
out = inputA;
} else {
out = poplin::triangularSolve(graph, inputA, inputB, leftSide, lower,
unitDiagonal, prog, debugContext, options,
&cache);
}
std::vector<std::pair<std::string, char *>> tmap;
std::unique_ptr<char[]> rawHostInputA, rawHostInputB, rawHostOutput,
rawHostOutputT;
if (!ignoreData) {
rawHostInputA = poplibs_test::util::allocateHostMemoryForTensor(
inputA, "A", graph, uploadProg, boost::none, tmap);
if (!runCholesky) {
rawHostInputB = poplibs_test::util::allocateHostMemoryForTensor(
inputB, "B", graph, uploadProg, boost::none, tmap);
}
rawHostOutput = poplibs_test::util::allocateHostMemoryForTensor(
out, "X", graph, boost::none, downloadProg, tmap);
rawHostOutputT = poplibs_test::util::allocateHostMemoryForTensor(
poplin::transposeGroupedMatrix(out), "XT", graph, boost::none,
downloadProg, tmap);
}
poplar::Engine engine(graph, {uploadProg, prog, downloadProg}, engineOptions);
if (vm.count("compile-only"))
return 0;
boost::multi_array<double, 3> hostInputA;
boost::multi_array<double, 3> hostInputAFilled;
boost::multi_array<double, 3> hostInputB;
if (!ignoreData) {
std::mt19937 randomEngine;
boost::random::uniform_real_distribution<> dist(0.01, 1.0);
boost::random::uniform_real_distribution<> diagonalDist(0.95, 1.05);
poplibs_test::util::attachStreams(engine, tmap);
if (runCholesky) {
hostInputAFilled.resize(boost::extents[numBatches][aRank][aRank]);
hostInputA.resize(boost::extents[numBatches][aRank][aRank]);
hostInputAFilled = createPositiveDefiniteMatrix(numBatches, aRank);
hostInputA = maskTriangularMatrix(hostInputAFilled, lower);
} else {
hostInputA.resize(boost::extents[numBatches][aRank][aRank]);
for (std::size_t g = 0; g < numBatches; ++g) {
auto matrix = hostInputA[g];
for (std::size_t i = 0; i < aRank; ++i) {
auto rows = matrix[i];
for (std::size_t j = 0; j < aRank; ++j) {
double value;
if (i == j) {
value = unitDiagonal ? 1.0 : diagonalDist(randomEngine);
} else if ((i <= j && !lower) || (j <= i && lower)) {
value = dist(randomEngine);
} else {
value = 0.0;
}
rows[j] = value;
}
}
}
}
poplibs_test::util::copy(target, hostInputA, dataType, rawHostInputA.get());
if (!runCholesky) {
hostInputB.resize(
boost::extents[numBatches][inputBShape[1]][inputBShape[2]]);
poplibs_test::util::writeRandomValues(target, dataType, hostInputB, -1.0,
1.0, randomEngine);
poplibs_test::util::copy(target, hostInputB, dataType,
rawHostInputB.get());
}
}
device.bind([&](const poplar::Device &d) {
engine.load(d);
if (!ignoreData) {
// upload
engine.run(0);
}
// convolve
engine.run(1);
if (!ignoreData) {
// download
engine.run(2);
}
});
bool matchesModel = true;
if (!ignoreData) {
uint64_t dim1, dim2;
if (runCholesky) {
dim1 = inputAShape[1];
dim2 = inputAShape[2];
} else {
dim1 = inputBShape[1];
dim2 = inputBShape[2];
}
boost::multi_array<double, 3> hostOutput(
boost::extents[numBatches][dim1][dim2]);
poplibs_test::util::copy(target, dataType, rawHostOutput.get(), hostOutput);
boost::multi_array<double, 3> hostOutputT(
boost::extents[numBatches][dim1][dim2]);
poplibs_test::util::copy(target, dataType, rawHostOutputT.get(),
hostOutputT);
boost::multi_array<double, 3> modelOutput(
boost::extents[numBatches][dim1][dim2]);
boost::multi_array<double, 3> *testOutput;
if (runCholesky) {
testOutput = &hostInputAFilled;
if (lower)
poplibs_test::gemm::generalGroupedMatrixMultiply(
hostOutput, hostOutputT, modelOutput);
else
poplibs_test::gemm::generalGroupedMatrixMultiply(
hostOutputT, hostOutput, modelOutput);
} else {
testOutput = &hostInputB;
if (leftSide) {
poplibs_test::gemm::generalGroupedMatrixMultiply(hostInputA, hostOutput,
modelOutput);
} else {
poplibs_test::gemm::generalGroupedMatrixMultiply(hostOutput, hostInputA,
modelOutput);
}
}
const auto tolerance = dataType == poplar::HALF ? 0.3 : 0.001;
matchesModel = poplibs_test::util::checkIsClose(
"AX vs B: ", *testOutput, modelOutput, tolerance, tolerance);
}
if (deviceType != DeviceType::Cpu && vm.count("profile")) {
engine.printProfileSummary(
std::cout, poplar::OptionFlags{{"showExecutionSteps", "true"}});
}
if (!matchesModel) {
std::cerr << "Validation failed\n";
return 1;
}
return 0;
} catch (const poplar::graph_memory_allocation_error &e) {
std::cerr << e.what() << std::endl;
// this exit code has been marked as a "skip" for ctest.
return 77;
}
|
{"hexsha": "77280576be1497b1d657048e9936f4b47b1f996c", "size": 13316, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tools/matrix_solver.cpp", "max_stars_repo_name": "graphcore/poplibs", "max_stars_repo_head_hexsha": "3fe5a3ecafe995eddb72675d1b4a7af8a622009e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 95.0, "max_stars_repo_stars_event_min_datetime": "2020-07-06T17:11:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T14:42:28.000Z", "max_issues_repo_path": "tools/matrix_solver.cpp", "max_issues_repo_name": "graphcore/poplibs", "max_issues_repo_head_hexsha": "3fe5a3ecafe995eddb72675d1b4a7af8a622009e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/matrix_solver.cpp", "max_forks_repo_name": "graphcore/poplibs", "max_forks_repo_head_hexsha": "3fe5a3ecafe995eddb72675d1b4a7af8a622009e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2020-07-15T12:32:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T14:58:45.000Z", "avg_line_length": 31.4056603774, "max_line_length": 80, "alphanum_fraction": 0.6167017122, "num_tokens": 3685}
|
subroutine wrimap(lundia ,error ,filename ,selmap ,simdat , &
& itdate ,tzone ,tunit ,dt ,mmax , &
& kmax ,lmax ,lstsci ,ltur ,nmaxus , &
& noroco ,norow ,nostat ,nsrc ,ntruv , &
& grdang ,dpsopt ,sferic ,lsed ,lsedtot , &
& zmodel ,namsrc ,namcon ,namsed , &
& kcu ,kcv ,kcs ,irocol , &
& xcor ,ycor ,xz ,yz ,alfas , &
& dp ,thick ,zk ,sig , &
& dps ,dpu ,dpv ,gsqs ,wrifou , &
& irequest ,fds ,iarrc ,mf ,ml , &
& nf ,nl ,nostatto ,nostatgl ,order_sta , &
& ntruvto ,ntruvgl ,order_tra ,ipartition,gdp )
!----- GPL ---------------------------------------------------------------------
!
! Copyright (C) Stichting Deltares, 2011-2016.
!
! This program is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation version 3.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program. If not, see <http://www.gnu.org/licenses/>.
!
! contact: delft3d.support@deltares.nl
! Stichting Deltares
! P.O. Box 177
! 2600 MH Delft, The Netherlands
!
! All indications and logos of, and references to, "Delft3D" and "Deltares"
! are registered trademarks of Stichting Deltares, and remain the property of
! Stichting Deltares. All rights reserved.
!
!-------------------------------------------------------------------------------
! $Id: wrimap.f90 6447 2016-08-17 15:23:27Z jagers $
! $HeadURL: https://svn.oss.deltares.nl/repos/delft3d/tags/6686/src/engines_gpl/flow2d3d/packages/io/src/output/wrimap.f90 $
!!--description-----------------------------------------------------------------
!
! Function: Writes the initial group 2 ('map-const') to
! MAP-DAT
! Selection is done using SELMAP. For elements like
! NAMCON where LMAX must be > 0 this coupling between
! LMAX and SELMAP is done in subroutine RDPRFL
! Method used:
!
!!--pseudo code and references--------------------------------------------------
! NONE
!!--declarations----------------------------------------------------------------
use precision
use dfparall
use datagroups
use globaldata
use dffunctionals
use wrtarray, only: wrtarray_nm, wrtvar, wrtarray_n, station, transec
use netcdf
!
implicit none
!
type(globdat), target :: gdp
!
! The following list of pointer parameters is used to point inside the gdp structure
!
real(hp) , pointer :: dearthrad
integer , dimension(:, :) , pointer :: mnit
integer , dimension(:, :) , pointer :: mnstat
integer , pointer :: lsal
integer , pointer :: ltem
integer , pointer :: mfg
integer , pointer :: nfg
integer , pointer :: nmaxgl
integer , pointer :: mmaxgl
integer , pointer :: io_prec
integer , dimension(:) , pointer :: smlay
logical , pointer :: densin
character(20) , dimension(:) , pointer :: namst
character(20) , dimension(:) , pointer :: namtra
logical , pointer :: ztbml
real(fp) , pointer :: rhow
real(fp) , pointer :: ag
!
! Global variables
!
integer , intent(in) :: irequest ! REQUESTTYPE_DEFINE: define variables, REQUESTTYPE_WRITE: write variables
integer , intent(in) :: itdate ! Description and declaration in exttim.igs
integer :: kmax ! Description and declaration in esm_alloc_int.f90
integer , intent(in) :: lmax ! Description and declaration in dimens.igs
integer , intent(in) :: lsed ! Description and declaration in esm_alloc_int.f90
integer , intent(in) :: lsedtot ! Description and declaration in esm_alloc_int.f90
integer , intent(in) :: lstsci ! Description and declaration in esm_alloc_int.f90
integer , intent(in) :: ltur ! Description and declaration in esm_alloc_int.f90
integer :: lundia ! Description and declaration in inout.igs
integer :: mmax ! Description and declaration in esm_alloc_int.f90
integer :: nmaxus ! Description and declaration in esm_alloc_int.f90
integer :: noroco ! Description and declaration in esm_alloc_int.f90
integer , intent(in) :: norow ! Description and declaration in esm_alloc_int.f90
integer :: nostat ! Description and declaration in dimens.igs
integer :: nsrc ! Description and declaration in esm_alloc_int.f90
integer :: ntruv ! Description and declaration in dimens.igs
integer , dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: ipartition ! Partition number
integer , dimension(5, noroco) :: irocol ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: kcs ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: kcu ! Description and declaration in esm_alloc_int.f90
integer , dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: kcv ! Description and declaration in esm_alloc_int.f90
logical , intent(out) :: error !! Flag=TRUE if an error is encountered
logical , intent(in) :: sferic ! Description and declaration in tricom.igs
logical , intent(in) :: zmodel ! Description and declaration in procs.igs
logical , intent(in) :: wrifou ! Description and declaration in procs.igs
real(fp) , intent(in) :: dt ! Description and declaration in esm_alloc_real.f90
real(fp) , intent(in) :: grdang ! Description and declaration in tricom.igs
real(fp) , intent(in) :: tunit ! Description and declaration in exttim.igs
real(fp) , intent(in) :: tzone ! Description and declaration in exttim.igs
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: alfas ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: dp ! Description and declaration in esm_alloc_real.f90
real(prec), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: dps ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: dpu ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: dpv ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: gsqs ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: xcor ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: xz ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: ycor ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(gdp%d%nlb:gdp%d%nub, gdp%d%mlb:gdp%d%mub) , intent(in) :: yz ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(kmax) :: thick ! Description and declaration in esm_alloc_real.f90
real(fp), dimension(kmax) , intent(in) :: sig ! Vertical coordinates of cell interfaces (SIGMA-MODEL)
real(fp), dimension(0:kmax) , intent(in) :: zk !! Vertical coordinates of cell interfaces
!! Flag for activation of Z-MODEL
character(*) , intent(in) :: filename ! File name
character(16) , intent(in) :: simdat !! Simulation date representing the
!! flow condition at this date
character(20), dimension(lmax) , intent(in) :: namcon ! Description and declaration in esm_alloc_char.f90
character(20), dimension(lsedtot) , intent(in) :: namsed ! Description and declaration in esm_alloc_char.f90
character(20), dimension(nsrc) :: namsrc ! Description and declaration in esm_alloc_char.f90
character(21) , intent(in) :: selmap ! Description and declaration in tricom.igs
character(8) , intent(in) :: dpsopt ! Description and declaration in numeco.igs
integer , intent(in) :: fds ! File handle of output NEFIS/NetCDF file
!
integer , dimension(4,0:nproc-1) , intent(in) :: iarrc ! array containing collected grid indices
integer , dimension(0:nproc-1) , intent(in) :: mf ! first index w.r.t. global grid in x-direction
integer , dimension(0:nproc-1) , intent(in) :: ml ! last index w.r.t. global grid in x-direction
integer , dimension(0:nproc-1) , intent(in) :: nf ! first index w.r.t. global grid in y-direction
integer , dimension(0:nproc-1) , intent(in) :: nl ! last index w.r.t. global grid in y-direction
!
integer , dimension(nostat) , intent(in) :: order_sta
integer , dimension(ntruv) , intent(in) :: order_tra
integer , intent(in) :: nostatgl ! global number of stations (i.e. original number excluding duplicate stations located in the halo regions)
integer , intent(in) :: nostatto ! total number of stations (including "duplicate" stations located in halo regions)
integer , intent(in) :: ntruvgl ! global number of tracks (i.e. original number excluding duplicate stations located in the halo regions)
integer , intent(in) :: ntruvto ! total number of tracks (including "duplicate" stations located in halo regions)
!
! Local variables
!
integer :: epsg
integer :: filetype
integer , dimension(1) :: idummy ! Help array to write integers
integer :: ierror ! Local error flag
integer :: istat
integer :: ifile
integer :: ip ! node number
integer , dimension(:) , allocatable :: ibuff1
integer , dimension(:,:) , allocatable :: ibuff2
integer , dimension(2) :: ival ! Local array for writing ITDATE and time (:= 00:00:00)
integer :: k
integer :: kmaxout
integer :: kmaxout_restr
integer :: l
integer :: lengl ! length of field containing collected data
integer :: lenlo ! length of field containing collected data
integer :: lhlp ! Help variable for teller constituents and turbulent quantities
integer :: lsedbl ! Number of bed load fractions: lsedtot-lsed
integer :: m ! Help variable
integer :: n ! Help variable
integer , external :: neferr
integer , external :: clsnef
integer , external :: open_datdef
!
integer :: iddim_time
integer :: iddim_n
integer :: iddim_nc
integer :: iddim_m
integer :: iddim_mc
integer :: iddim_kmax
integer :: iddim_kmaxout
integer :: iddim_kmaxout_restr
integer :: iddim_kmax1
integer :: iddim_lstsci
integer :: iddim_ltur
integer :: iddim_nostat
integer :: iddim_nsrc
integer :: iddim_ntruv
integer :: iddim_norow
integer :: iddim_noroco
integer :: iddim_lsed
integer :: iddim_lsedtot
integer :: iddim_lsedbl
integer :: iddim_2
integer :: iddim_4
integer :: iddim_5
integer :: iddim_7
integer :: iddim_x
!
integer :: idatt_cmpintf
integer :: idatt_cmplyr
integer :: idatt_grd
integer , dimension(7) :: idatt_grid
integer :: idatt_sigfc
integer :: idatt_sigfi
integer :: idatt_stgd
integer :: idatt_stgu
integer :: idatt_stgv
integer :: idatt_stgz
integer :: idatt_up
integer :: idatt_xyc
integer :: idatt_xyw
!
integer :: idvar_coordmap
integer , dimension(:) , allocatable :: smlay_restr ! copy of smlay, excluding layer zero
real(fp) , dimension(:) , allocatable :: rbuff1 ! local work array for gathering reals (1 dim)
!
character(8) , dimension(1) :: cdum8 ! Help array to write Nefis files
character(16) , dimension(1) :: layermode ! Help array to write LAYER_MODEL
character(16) , dimension(1) :: cdum16 ! Help array to write Nefis files
character(21) , dimension(1) :: cdum21 ! Help array to write Nefis files
character(20) , dimension(:) , allocatable :: csbuff2 ! work array for gathering names of stations (exc. duplicates)
character(11) :: epsgstring
character(16) :: grnam2 ! Data-group name defined for the NEFIS-files
character(20) , dimension(:) , allocatable :: namhlp ! Help array for name constituents and turbulent quantities
character(64) :: xcoordname ! Name of X coordinate: PROJECTION_X_COORDINATE or LONGITUDE
character(64) :: xcoordunit ! Unit of X coordinate: M or DEGREES_EAST
character(64) :: ycoordname ! Name of Y coordinate: PROJECTION_Y_COORDINATE or LATITUDE
character(64) :: ycoordunit ! Unit of Y coordinate: M or DEGREES_NORTH
!
! Data statements
!
data grnam2/'map-const'/
!
!! executable statements -------------------------------------------------------
!
dearthrad => gdp%gdconstd%dearthrad
!
mnit => gdp%gdstations%mnit
mnstat => gdp%gdstations%mnstat
io_prec => gdp%gdpostpr%io_prec
smlay => gdp%gdpostpr%smlay
namst => gdp%gdstations%namst
namtra => gdp%gdstations%namtra
ztbml => gdp%gdzmodel%ztbml
rhow => gdp%gdphysco%rhow
ag => gdp%gdphysco%ag
lsal => gdp%d%lsal
ltem => gdp%d%ltem
densin => gdp%gdmorpar%densin
!
! Initialize local variables
!
kmaxout = size(smlay)
if (smlay(1) == 0) then
kmaxout_restr = kmaxout - 1
allocate(smlay_restr(kmaxout_restr))
smlay_restr = smlay(2:)
else
kmaxout_restr = kmaxout
allocate(smlay_restr(kmaxout_restr))
smlay_restr = smlay
endif
error = .false.
if (wrifou) then
ifile = FILOUT_FOU
else
ifile = FILOUT_MAP
endif
filetype = getfiletype(gdp, ifile)
lsedbl = lsedtot - lsed
!
if (zmodel) then
if (ztbml) then
layermode(1) = 'Z-MODEL, ZTBML'
else
layermode(1) = 'Z-MODEL'
endif
else
layermode(1) = 'SIGMA-MODEL'
endif
!
mfg => gdp%gdparall%mfg
nfg => gdp%gdparall%nfg
mmaxgl => gdp%gdparall%mmaxgl
nmaxgl => gdp%gdparall%nmaxgl
!
ierror = 0
select case (irequest)
case (REQUESTTYPE_DEFINE)
!
if (sferic) then
xcoordname = 'longitude'
xcoordunit = 'degrees_east'
ycoordname = 'laitude'
ycoordunit = 'degrees_north'
else
xcoordname = 'projection_x_coordinate'
xcoordunit = 'm'
ycoordname = 'projection_y_coordinate'
ycoordunit = 'm'
endif
!
! Define dimensions
!
iddim_time = adddim(gdp, lundia, ifile, 'time' , nf90_unlimited)
iddim_n = adddim(gdp, lundia, ifile, 'N' , nmaxgl) ! Number of N-grid points (cell centres)
iddim_nc = adddim(gdp, lundia, ifile, 'NC' , nmaxgl) ! Number of N-grid points (corner points)
iddim_m = adddim(gdp, lundia, ifile, 'M' , mmaxgl) ! Number of M-grid points (cell centres)
iddim_mc = adddim(gdp, lundia, ifile, 'MC' , mmaxgl) ! Number of M-grid points (corner points)
if (zmodel) then
iddim_kmax = adddim(gdp, lundia, ifile, 'K_LYR' , kmax) ! Number of layers
iddim_kmax1 = adddim(gdp, lundia, ifile, 'K_INTF' , kmax+1) ! Number of layer interfaces
idatt_cmpintf = addatt(gdp, lundia, ifile, 'compress','ZK')
idatt_cmplyr = addatt(gdp, lundia, ifile, 'compress','ZK_LYR')
else
iddim_kmax = adddim(gdp, lundia, ifile, 'SIG_LYR', kmax) ! Number of layers
iddim_kmax1 = adddim(gdp, lundia, ifile, 'SIG_INTF', kmax+1) ! Number of layer interfaces
idatt_cmpintf = addatt(gdp, lundia, ifile, 'compress','SIG_INTF')
idatt_cmplyr = addatt(gdp, lundia, ifile, 'compress','SIG_LYR')
endif
iddim_kmaxout = adddim(gdp, lundia, ifile, 'KMAXOUT', kmaxout) ! Number of layers written
iddim_kmaxout_restr = adddim(gdp, lundia, ifile, 'KMAXOUT_RESTR', kmaxout_restr) ! Number of layers written
!
if (lstsci >0) iddim_lstsci = adddim(gdp, lundia, ifile, 'LSTSCI' , lstsci ) ! Number of constituents
if (ltur >0) iddim_ltur = adddim(gdp, lundia, ifile, 'LTUR' , ltur ) ! Number of turbulence quantities
if (nostat >0) iddim_nostat = adddim(gdp, lundia, ifile, 'NOSTAT' , nostatgl) ! Number of monitoring stations
if (nsrc >0) iddim_nsrc = adddim(gdp, lundia, ifile, 'NSRC' , nsrc ) ! Number of discharges
if (ntruv >0) iddim_ntruv = adddim(gdp, lundia, ifile, 'NTRUV' , ntruvgl ) ! Number of monitoring cross-sections
iddim_norow = adddim(gdp, lundia, ifile, 'NOROW' , norow ) ! Number of rows for IROCOL table
iddim_noroco = adddim(gdp, lundia, ifile, 'NOROCO' , noroco ) ! Number of columns of IROCOL table
if (lsed >0) iddim_lsed = adddim(gdp, lundia, ifile, 'LSED' , lsed ) ! Number of sediment constituents
if (lsedtot >0) iddim_lsedtot= adddim(gdp, lundia, ifile, 'LSEDTOT' , lsedtot ) ! Number of total sediment fractions
if (lsedbl >0) iddim_lsedbl = adddim(gdp, lundia, ifile, 'LSEDBL' , lsedbl ) ! Number of bedload sediment fractions
iddim_2 = adddim(gdp, lundia, ifile, 'length_2' , 2 )
iddim_4 = adddim(gdp, lundia, ifile, 'length_4' , 4 )
iddim_5 = adddim(gdp, lundia, ifile, 'length_5' , 5 )
iddim_7 = adddim(gdp, lundia, ifile, 'length_7' , 7 )
!
idatt_xyc = addatt(gdp, lundia, ifile, 'coordinates','XCOR YCOR')
idatt_xyw = addatt(gdp, lundia, ifile, 'coordinates','XZ YZ')
idatt_grd = addatt(gdp, lundia, ifile, 'grid','grid')
idatt_stgu = addatt(gdp, lundia, ifile, 'location','edge1')
idatt_stgv = addatt(gdp, lundia, ifile, 'location','edge2')
idatt_stgz = addatt(gdp, lundia, ifile, 'location','face')
idatt_stgd = addatt(gdp, lundia, ifile, 'location','node')
!
call addatt_class(gdp, lundia, ifile, 'u', (/idatt_grd, idatt_stgu/) )
call addatt_class(gdp, lundia, ifile, 'v', (/idatt_grd, idatt_stgv/) )
call addatt_class(gdp, lundia, ifile, 'z', (/idatt_xyw, idatt_grd, idatt_stgz/) )
call addatt_class(gdp, lundia, ifile, 'd', (/idatt_xyc, idatt_grd, idatt_stgd/) )
!
lhlp = 0
if (index(selmap(6:13), 'Y')/=0) lhlp = lhlp + lstsci
if (index(selmap(14:15), 'Y')/=0) lhlp = lhlp + ltur
lhlp = max(1, lhlp)
iddim_x = adddim(gdp, lundia, ifile, 'length_x' , lhlp )
!
if (lsedtot>0) then
idatt_sigfc = addatt(gdp, lundia, ifile, 'formula_terms', 'sigma: SIG_LYR eta: S1 depth: DPS')
idatt_sigfi = addatt(gdp, lundia, ifile, 'formula_terms', 'sigma: SIG_INTF eta: S1 depth: DPS')
else
idatt_sigfc = addatt(gdp, lundia, ifile, 'formula_terms', 'sigma: SIG_LYR eta: S1 depth: DPS0')
idatt_sigfi = addatt(gdp, lundia, ifile, 'formula_terms', 'sigma: SIG_INTF eta: S1 depth: DPS0')
endif
idatt_up = addatt(gdp, lundia, ifile, 'positive','up')
!
! map-const
!
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
call addelm(gdp, lundia, ifile, grnam2, 'ITDATE', ' ', IO_INT4 , 1, dimids=(/iddim_2/), longname='Initial date (input) & time (default 00:00:00)', unit='[YYYYMMDD]')
call addelm(gdp, lundia, ifile, grnam2, 'TZONE', ' ', io_prec , 0, longname='Local time zone', unit='h')
call addelm(gdp, lundia, ifile, grnam2, 'TUNIT', ' ', io_prec , 0, longname='Time scale related to seconds', unit='s')
call addelm(gdp, lundia, ifile, grnam2, 'DT', ' ', io_prec , 0, longname='Time step (DT*TUNIT sec)')
call addelm(gdp, lundia, ifile, grnam2, 'SIMDAT', ' ', 16 , 0, longname='Simulation date and time [YYYYMMDD HHMMSS]') !CHARACTER
call addelm(gdp, lundia, ifile, grnam2, 'SELMAP', ' ', 21 , 0, longname='Selection flag for field values (2dH, 1dV & 2dV)') !CHARACTER
call addelm(gdp, lundia, ifile, grnam2, 'NMAX', ' ', IO_INT4 , 0, longname='Number of N-grid points')
call addelm(gdp, lundia, ifile, grnam2, 'MMAX', ' ', IO_INT4 , 0, longname='Number of M-grid points')
call addelm(gdp, lundia, ifile, grnam2, 'KMAX', ' ', IO_INT4 , 0, longname='Number of layers')
! LSTSCI var. name in MAP FILE must remain LSTCI for GPP to work properly
call addelm(gdp, lundia, ifile, grnam2, 'LSTCI', ' ', IO_INT4 , 0, longname='Number of constituents')
call addelm(gdp, lundia, ifile, grnam2, 'LTUR', ' ', IO_INT4 , 0, longname='Number of turbulence quantities')
call addelm(gdp, lundia, ifile, grnam2, 'NOSTAT', ' ', IO_INT4 , 0, longname='Number of monitoring stations')
call addelm(gdp, lundia, ifile, grnam2, 'NSRC', ' ', IO_INT4 , 0, longname='Number of discharge')
call addelm(gdp, lundia, ifile, grnam2, 'NTRUV', ' ', IO_INT4 , 0, longname='Number of monitoring cross-sections')
call addelm(gdp, lundia, ifile, grnam2, 'GRDANG', ' ', io_prec , 0, longname='Edge between y-axis and real north', unit='arc_degrees')
endif
call addelm(gdp, lundia, ifile, grnam2, 'XCOR', xcoordname, io_prec , 2, dimids=(/iddim_nc, iddim_mc/), longname='X-coordinate of grid points', unit=xcoordunit)
call addelm(gdp, lundia, ifile, grnam2, 'YCOR', ycoordname, io_prec , 2, dimids=(/iddim_nc, iddim_mc/), longname='Y-coordinate of grid points', unit=ycoordunit)
call addelm(gdp, lundia, ifile, grnam2, 'XZ', xcoordname, io_prec , 2, dimids=(/iddim_n , iddim_m /), longname='X-coordinate of cell centres', unit=xcoordunit)
call addelm(gdp, lundia, ifile, grnam2, 'YZ', ycoordname, io_prec , 2, dimids=(/iddim_n , iddim_m /), longname='Y-coordinate of cell centres', unit=ycoordunit)
call addelm(gdp, lundia, ifile, grnam2, 'ALFAS', ' ', io_prec , 2, dimids=(/iddim_n , iddim_m /), longname='Orientation ksi-axis w.r.t. pos.x-axis at water level point', unit='arc_degrees', acl='z')
call addelm(gdp, lundia, ifile, grnam2, 'KCU', ' ', IO_INT4 , 2, dimids=(/iddim_n , iddim_mc/), longname='Mask array for U-velocity points', acl='u')
call addelm(gdp, lundia, ifile, grnam2, 'KCV', ' ', IO_INT4 , 2, dimids=(/iddim_nc, iddim_m /), longname='Mask array for V-velocity points', acl='v')
call addelm(gdp, lundia, ifile, grnam2, 'KCS', ' ', IO_INT4 , 2, dimids=(/iddim_n , iddim_m /), longname='Non-active/active water-level point', acl='z')
call addelm(gdp, lundia, ifile, grnam2, 'DP0', ' ', io_prec , 2, dimids=(/iddim_nc, iddim_mc/), longname='Initial bottom depth (positive down)', unit='m', acl='d')
call addelm(gdp, lundia, ifile, grnam2, 'DPS0', ' ', io_prec , 2, dimids=(/iddim_n , iddim_m /), longname='Initial bottom depth at zeta points (positive down)', unit='m', acl='z')
call addelm(gdp, lundia, ifile, grnam2, 'DPU0', ' ', io_prec , 2, dimids=(/iddim_n , iddim_mc/), longname='Initial bottom depth at u points (positive down)', unit='m', acl='u')
call addelm(gdp, lundia, ifile, grnam2, 'DPV0', ' ', io_prec , 2, dimids=(/iddim_nc, iddim_m /), longname='Initial bottom depth at v points (positive down)', unit='m', acl='v')
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
call addelm(gdp, lundia, ifile, grnam2, 'DRYFLP', ' ', 8 , 0, longname='Criterium to calculate depth in zeta points') !CHARACTER
call addelm(gdp, lundia, ifile, grnam2, 'NOROW', ' ', IO_INT4 , 0, longname='Number of rows for IROCOL table')
call addelm(gdp, lundia, ifile, grnam2, 'NOROCO', ' ', IO_INT4 , 0, longname='Number of rows & columns of IROCOL table')
call addelm(gdp, lundia, ifile, grnam2, 'IROCOL', ' ', IO_INT4 , 2, dimids=(/iddim_5, iddim_noroco/), longname='Administration of zeta points')
call addelm(gdp, lundia, ifile, grnam2, 'THICK', ' ', io_prec , 1, dimids=(/iddim_kmax/), longname='Fraction part of layer thickness of total water-height', unit='[ .01*% ]')
call addelm(gdp, lundia, ifile, grnam2, 'NAMCON', ' ', 20 , 1, dimids=(/iddim_x/), longname='Name of constituent & turbulent quantity') !CHARACTER
if (nostatgl>0) then
call addelm(gdp, lundia, ifile, grnam2, 'MNSTAT', ' ', IO_INT4 , 2, dimids=(/iddim_2, iddim_nostat/), longname='(M,N) indices of monitoring stations')
call addelm(gdp, lundia, ifile, grnam2, 'NAMST', ' ', 20 , 1, dimids=(/iddim_nostat/), longname='Name of monitoring station') !CHARACTER
endif
if (nsrc>0) then
call addelm(gdp, lundia, ifile, grnam2, 'NAMSRC', ' ', 20 , 1, dimids=(/iddim_nsrc/), longname='Name of discharge source') !CHARACTER
endif
if (ntruvgl>0) then
call addelm(gdp, lundia, ifile, grnam2, 'MNTRA', ' ', IO_INT4 , 2, dimids=(/iddim_4, iddim_ntruv/), longname='(M1,N1)-(M2,N2) indices of monitoring cross-sections')
call addelm(gdp, lundia, ifile, grnam2, 'NAMTRA', ' ', 20 , 1, dimids=(/iddim_ntruv/), longname='Name of monitoring cross-section') !CHARACTER
endif
if (lsed>0) then
call addelm(gdp, lundia, ifile, grnam2, 'LSED', ' ', IO_INT4 , 0, longname='Number of sediment constituents')
endif
if (lsedbl>0) then
call addelm(gdp, lundia, ifile, grnam2, 'LSEDBL', ' ', IO_INT4 , 0, longname='Number of bedload sediment fractions')
endif
if (lsedtot>0) then
call addelm(gdp, lundia, ifile, grnam2, 'NAMSED', ' ', 20 , 1, dimids=(/iddim_lsedtot/), longname='Name of sediment fraction') !CHARACTER
endif
else
if (lstsci>0) then
call addelm(gdp, lundia, ifile, grnam2, 'NAMCON', ' ', 20 , 1, dimids=(/iddim_lstsci/), longname='Name of constituent quantity') !CHARACTER
endif
if (ltur>0) then
call addelm(gdp, lundia, ifile, grnam2, 'NAMTUR', ' ', 20 , 1, dimids=(/iddim_ltur/) , longname='Name of turbulent quantity' ) !CHARACTER
endif
endif
if (zmodel) then
if (filetype /= FTYPE_NEFIS) then
call addelm(gdp, lundia, ifile, grnam2, 'ZK_LYR', ' ', io_prec , 1, dimids=(/iddim_kmax/) , longname='Vertical coordinates of layer centres' , unit='m', attribs=(/idatt_up/) )
endif
call addelm(gdp, lundia, ifile, grnam2, 'ZK', ' ', io_prec , 1, dimids=(/iddim_kmax1/), longname='Vertical coordinates of layer interfaces', unit='m', attribs=(/idatt_up/) )
elseif (filetype /= FTYPE_NEFIS) then
call addelm(gdp, lundia, ifile, grnam2, 'SIG_LYR' , 'ocean_sigma_coordinate', io_prec , 1, dimids=(/iddim_kmax/) , longname='Sigma coordinates of layer centres' , attribs=(/idatt_sigfc/) )
call addelm(gdp, lundia, ifile, grnam2, 'SIG_INTF', 'ocean_sigma_coordinate', io_prec , 1, dimids=(/iddim_kmax1/), longname='Sigma coordinates of layer interfaces', attribs=(/idatt_sigfi/) )
endif
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
call addelm(gdp, lundia, ifile, grnam2, 'COORDINATES', ' ', 16 , 0, longname='Cartesian or Spherical coordinates') !CHARACTER
call addelm(gdp, lundia, ifile, grnam2, 'LAYER_MODEL', ' ', 16 , 0, longname='Sigma-model or Z-model') !CHARACTER
elseif (filetype == FTYPE_NETCDF) then
ierror = nf90_put_att(fds, nf90_global, 'LAYER_MODEL', layermode(1));
call nc_check_err(lundia, ierror, "put_att global LAYER_MODEL", filename)
endif
call addelm(gdp, lundia, ifile, grnam2, 'GSQS', ' ', io_prec , 2, dimids=(/iddim_n, iddim_m/), longname='Horizontal area of computational cell', unit='m2', acl='z')
call addelm(gdp, lundia, ifile, grnam2, 'PPARTITION', ' ', IO_INT4 , 2, dimids=(/iddim_n, iddim_m/), longname='Partition', acl='z')
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
call addelm(gdp, lundia, ifile, grnam2, 'OUTPUT_LAYERS', ' ', IO_INT4, 1, dimids=(/iddim_kmaxout/), longname='User selected output layers')
else
call addelm(gdp, lundia, ifile, grnam2, 'KMAXOUT', ' ', IO_INT4, 1, dimids=(/iddim_kmaxout/), longname='User selected output layer interfaces', attribs=(/idatt_cmpintf/) )
call addelm(gdp, lundia, ifile, grnam2, 'KMAXOUT_RESTR', ' ', IO_INT4, 1, dimids=(/iddim_kmaxout_restr/), longname='User selected output layer centres', attribs=(/idatt_cmplyr/) )
endif
call addelm(gdp, lundia, ifile, grnam2, 'RHOCONST', ' ', io_prec , 0, longname='User specified constant density', unit='kg/m3')
call addelm(gdp, lundia, ifile, grnam2, 'GRAVITY', ' ', io_prec , 0, longname='Gravitational acceleration', unit='m/s2')
!
if (filetype == FTYPE_NETCDF) then
!
! grid topology according SGRID conventions
!
idatt_grid(1) = addatt(gdp, lundia, ifile, 'cf_role','grid_topology')
idatt_grid(2) = addatt(gdp, lundia, ifile, 'topology_dimension',2)
idatt_grid(3) = addatt(gdp, lundia, ifile, 'node_dimensions','MC NC')
idatt_grid(4) = addatt(gdp, lundia, ifile, 'face_dimensions','M:MC (padding: low) N:NC (padding: low)')
idatt_grid(5) = addatt(gdp, lundia, ifile, 'face_coordinates','XZ YZ')
idatt_grid(6) = addatt(gdp, lundia, ifile, 'node_coordinates','XCOR YCOR')
if (zmodel) then
idatt_grid(7) = addatt(gdp, lundia, ifile, 'vertical_dimensions','K_LYR:K_INTF (padding: none)')
else
idatt_grid(7) = addatt(gdp, lundia, ifile, 'vertical_dimensions','SIG_LYR:SIG_INTF (padding: none)')
endif
call addelm(gdp, lundia, ifile, grnam2, 'grid', ' ',IO_INT4, 0, attribs=idatt_grid )
!
! coordinate mapping
!
!ierror = nf90_def_var(fds, 'projected_coordinate_system', nf90_int, idvar_coordmap); call nc_check_err(lundia, ierror, "def_var coordinate mapping", filename)
!if (sferic) then
! epsg = 4326
! epsgstring = 'EPGS:4326'
! ierror = nf90_put_att(fds, idvar_coordmap, 'name', 'WGS84' ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
! ierror = nf90_put_att(fds, idvar_coordmap, 'grid_mapping_name', 'latitude_longitude'); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!else
! epsg = 28992
! epsgstring = 'EPGS:28992'
! ierror = nf90_put_att(fds, idvar_coordmap, 'name', 'Unknown projected' ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
! ierror = nf90_put_att(fds, idvar_coordmap, 'grid_mapping_name', 'Unknown projected' ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!endif
!ierror = nf90_put_att(fds, idvar_coordmap, 'epsg', epsg ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'longitude_of_prime_meridian', 0d0 ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!
! M = semi_major_axis and m = semi_minor_axis, then inverse_flatting should be M/(M-m)
!ierror = nf90_put_att(fds, idvar_coordmap, 'semi_major_axis', dearthrad ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'semi_minor_axis', 6356752.314245d0 ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'inverse_flattening', 298.257223563d0 ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'proj4_params', ' ' ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'EPSG_code', trim(epsgstring) ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'projection_name', ' ' ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!ierror = nf90_put_att(fds, idvar_coordmap, 'wkt', ' ' ); call nc_check_err(lundia, ierror, "coordinate mapping put_att", filename)
!
!ierror = nf90_put_att(fds, idvar_yz, 'grid_mapping', 'projected_coordinate_system'); call nc_check_err(lundia, ierror, "put_att YZ grid_mapping", trim(filename))
endif
!
case (REQUESTTYPE_WRITE)
!
if (filetype == FTYPE_NETCDF) then
!
!ierror = nf90_put_var(fds, idvar_coordmap, epsg);call nc_check_err(lundia, ierror, "put_var coordmap", filename)
!
! dummy value for grid
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, 0, 'grid')
endif
!
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
!
! element 'ITDATE'
!
ival(1) = itdate
ival(2) = 000000
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, ival, 'ITDATE')
if (ierror/=0) goto 9999
!
! element 'TZONE'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, tzone, 'TZONE')
if (ierror/=0) goto 9999
!
! element 'TUNIT'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, tunit, 'TUNIT')
if (ierror/=0) goto 9999
!
! element 'DT'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, dt, 'DT')
if (ierror/=0) goto 9999
!
! element 'SIMDAT'
!
cdum16(1) = simdat
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, cdum16, 'SIMDAT')
if (ierror/=0) goto 9999
!
! element 'SELMAP'
!
cdum21(1) = selmap
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, cdum21, 'SELMAP')
if (ierror/=0) goto 9999
!
! element 'NMAX'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, nmaxgl, 'NMAX')
if (ierror/=0) goto 9999
!
! element 'MMAX'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, mmaxgl, 'MMAX')
if (ierror/=0) goto 9999
!
! element 'KMAX'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, kmax, 'KMAX')
if (ierror/=0) goto 9999
!
! element 'LSTCI' Variable is now LSTSCI
!
idummy(1) = 0
if (index(selmap(6:13), 'Y')/=0 .and. lstsci>0) idummy(1) = lstsci
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, idummy, 'LSTCI')
if (ierror/=0) goto 9999
!
! element 'LTUR'
!
idummy(1) = 0
if (index(selmap(14:15), 'Y')/=0 .and. ltur>0) idummy(1) = ltur
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, idummy, 'LTUR')
if (ierror/=0) goto 9999
!
! element 'NOSTAT'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, nostatgl, 'NOSTAT')
if (ierror/=0) goto 9999
!
! element 'NSRC'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, nsrc, 'NSRC')
if (ierror/=0) goto 9999
!
! element 'NTRUV'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, ntruvgl, 'NTRUV')
if (ierror/=0) goto 9999
!
! element 'GRDANG'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, grdang, 'GRDANG')
if (ierror/=0) goto 9999
endif
!
! element 'XCOR'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, xcor, 'XCOR')
if (ierror/=0) goto 9999
!
! element 'YCOR'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, ycor, 'YCOR')
if (ierror/=0) goto 9999
!
! element 'XZ'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, xz, 'XZ')
if (ierror/=0) goto 9999
!
! element 'YZ'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, yz, 'YZ')
if (ierror/=0) goto 9999
!
! element 'ALFAS'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, alfas, 'ALFAS')
if (ierror/=0) goto 9999
!
! element 'KCU'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, kcu, 'KCU')
if (ierror/=0) goto 9999
!
! element 'KCV'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, kcv, 'KCV')
if (ierror/=0) goto 9999
!
! element 'KCS'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, kcs, 'KCS')
if (ierror/=0) goto 9999
!
! element 'DP0'
!
if (dpsopt == 'DP') then
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, dps, 'DP0')
else
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, dp, 'DP0')
endif
if (ierror/=0) goto 9999
!
! element 'DPS0'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, dps, 'DPS0')
if (ierror/=0) goto 9999
!
! element 'DPU0'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, dpu, 'DPU0')
if (ierror/=0) goto 9999
!
! element 'DPV0'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, dpv, 'DPV0')
if (ierror/=0) goto 9999
!
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
!
! The necessary information is currently held by DPSOPT but
! for backward compatibility the quantity is still called
! DRYFLP on the TRIM file.
!
! element 'DRYFLP'
!
cdum8(1) = dpsopt
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, cdum8, 'DRYFLP')
if (ierror/=0) goto 9999
!
! element 'NOROW'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, norow, 'NOROW')
if (ierror/=0) goto 9999
!
! element 'NOROCO'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, noroco, 'NOROCO')
if (ierror/=0) goto 9999
!
! element 'IROCOL'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, irocol, 'IROCOL')
if (ierror/=0) goto 9999
!
! element 'THICK'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, thick, 'THICK')
if (ierror/=0) goto 9999
!
! only if lmax > 0 (:= SELMAP( 6:15) <> 'NNNNNNNNNN')
!
if (index(selmap(6:15), 'Y')/=0) then
allocate(namhlp(lstsci+ltur), stat=istat)
lhlp = 0
if (index(selmap(6:13), 'Y')>0) then
do l = 1, lstsci
namhlp(l) = namcon(l)
enddo
lhlp = lhlp + lstsci
endif
if (index(selmap(14:15), 'Y')>0) then
do l = 1, ltur
namhlp(lhlp + l) = namcon(lstsci + l)
enddo
endif
!
! element 'NAMCON'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, namhlp, 'NAMCON')
if (ierror/=0) goto 9999
deallocate(namhlp, stat=istat)
endif
!
! only if nostat > 0
!
if (nostatgl > 0) then
!
! element 'MNSTAT'
!
allocate(ibuff2(2,nostat), stat=istat)
do k=1,nostat
!
! mnstat contains indices with respect to this partion
! transfer into global indices
!
ibuff2(1,k) = mnstat(1,k) + mfg - 1
ibuff2(2,k) = mnstat(2,k) + nfg - 1
enddo
call wrtarray_n(fds, filename, filetype, grnam2, &
& 1, nostat, nostatto, nostatgl, order_sta, gdp, &
& 2, &
& ierror, lundia, ibuff2, 'MNSTAT', station, mergedim=2)
deallocate(ibuff2, stat=istat)
if (ierror/=0) goto 9999
!
! element 'NAMST'
!
call wrtarray_n(fds, filename, filetype, grnam2, &
& 1, nostat, nostatto, nostatgl, order_sta, gdp, &
& ierror, lundia, namst, 'NAMST')
if (ierror/=0) goto 9999
endif
!
! only if nsrc > 0
!
if (nsrc > 0) then
!
! element 'NAMSRC'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, namsrc, 'NAMSRC')
if (ierror/=0) goto 9999
endif
!
! only if ntruv > 0
!
if (ntruvgl > 0) then
!
! element 'MNTRA'
!
allocate(ibuff2(4,ntruv), stat=istat)
ibuff2 = 0
do k = 1, ntruv
!
! mnit contains indices with respect to this partion
! transfer into global indices
!
ibuff2(1,k) = mnit(1,k) + mfg - 1
ibuff2(2,k) = mnit(2,k) + nfg - 1
ibuff2(3,k) = mnit(3,k) + mfg - 1
ibuff2(4,k) = mnit(4,k) + nfg - 1
enddo
call wrtarray_n(fds, filename, filetype, grnam2, &
& 1, ntruv, ntruvto, ntruvgl, order_tra, gdp, &
& 4, &
& ierror, lundia, ibuff2, 'MNTRA', transec, mergedim=2)
deallocate(ibuff2, stat=istat)
if (ierror/=0) goto 9999
!
! element 'NAMTRA'
!
call wrtarray_n(fds, filename, filetype, grnam2, &
& 1, ntruv, ntruvto, ntruvgl, order_tra, gdp, &
& ierror, lundia, namtra, 'NAMTRA')
endif
!
! element 'LSED'
!
if (lsed>0) then
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, lsed, 'LSED')
if (ierror/=0) goto 9999
endif
!
! element 'LSEDBL'
!
if (lsedbl>0 .and. inode == master) then
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, lsedbl, 'LSEDBL')
if (ierror/=0) goto 9999
endif
!
! element 'NAMSED'
!
if (lsedtot>0 .and. inode == master) then
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, namsed, 'NAMSED')
if (ierror/=0) goto 9999
endif
else
!
! element 'NAMCON'
!
if (lstsci>0) then
allocate(namhlp(lstsci), stat=istat)
do l = 1, lstsci
namhlp(l) = namcon(l)
enddo
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, namhlp, 'NAMCON')
if (ierror/=0) goto 9999
deallocate(namhlp, stat=istat)
endif
!
! element 'NAMTUR'
!
if (ltur>0) then
allocate(namhlp(ltur), stat=istat)
do l = 1, ltur
namhlp(l) = namcon(lstsci+l)
enddo
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, namhlp, 'NAMTUR')
if (ierror/=0) goto 9999
deallocate(namhlp, stat=istat)
endif
endif
!
! element 'ZK'
!
if (zmodel) then
if (filetype /= FTYPE_NEFIS) then
allocate(rbuff1(kmax), stat=istat)
do k = 1, kmax
rbuff1(k) = (zk(k-1)+zk(k))/2.0_fp
enddo
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, rbuff1, 'ZK_LYR')
if (ierror/=0) goto 9999
deallocate(rbuff1, stat=istat)
endif
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, zk, 'ZK')
if (ierror/=0) goto 9999
elseif (filetype /= FTYPE_NEFIS) then
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, sig, 'SIG_LYR')
if (ierror/=0) goto 9999
!
allocate(rbuff1(0:kmax), stat=istat)
rbuff1(0) = 0.0_fp
do k = 1, kmax
rbuff1(k) = rbuff1(k-1) - thick(k)
enddo
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, rbuff1, 'SIG_INTF')
if (ierror/=0) goto 9999
deallocate(rbuff1, stat=istat)
endif
!
! element 'COORDINATES'
!
if (filetype == FTYPE_NEFIS) then ! for NEFIS only
if (sferic) then
cdum16(1) = 'SPHERICAL'
else
cdum16(1) = 'CARTESIAN'
endif
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, cdum16, 'COORDINATES')
if (ierror/=0) goto 9999
!
! element 'LAYER_MODEL'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, layermode, 'LAYER_MODEL')
if (ierror/=0) goto 9999
endif
!
! element 'GSQS'
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, gsqs, 'GSQS')
if (ierror/=0) goto 9999
!
! Parallel partition
!
call wrtarray_nm(fds, filename, filetype, grnam2, 1, &
& nf, nl, mf, ml, iarrc, gdp, &
& ierror, lundia, ipartition, 'PPARTITION')
if (ierror/=0) goto 9999
!
if (filetype == FTYPE_NEFIS) then
!
! element 'OUTPUT_LAYERS'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, smlay, 'OUTPUT_LAYERS')
if (ierror/=0) goto 9999
else
!
! element 'KMAXOUT'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, smlay, 'KMAXOUT')
if (ierror/=0) goto 9999
!
! element 'KMAXOUT_RESTR'
!
allocate(ibuff1(kmaxout_restr), stat=istat)
ibuff1 = smlay_restr-1 ! the compress attribute unfortunately requires starting index equal to 0 like C
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, ibuff1, 'KMAXOUT_RESTR')
deallocate(ibuff1, stat=istat)
if (ierror/=0) goto 9999
endif
!
! element 'RHOCONST'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, rhow, 'RHOCONST')
if (ierror/=0) goto 9999
!
! element 'GRAVITY'
!
call wrtvar(fds, filename, filetype, grnam2, 1, &
& gdp, ierror, lundia, ag, 'GRAVITY')
if (ierror/=0) goto 9999
end select
deallocate(smlay_restr)
!
! write error message if error occured and set error = .true.
!
9999 continue
if (ierror /= 0) error = .true.
end subroutine wrimap
|
{"hexsha": "1681d910baf1ee91e3953a99233fae0a851e3487", "size": 60014, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/flow2d3d/packages/io/src/output/wrimap.f90", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/flow2d3d/packages/io/src/output/wrimap.f90", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/flow2d3d/packages/io/src/output/wrimap.f90", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 59.2438302073, "max_line_length": 222, "alphanum_fraction": 0.4817875829, "num_tokens": 16359}
|
# This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/models/hmr.py
# Adhere to their licence to use this script
import math
import torch
import numpy as np
import os.path as osp
import torch.nn as nn
import torchvision.models.resnet as resnet
from lib.core.config import VIBE_DATA_DIR
from lib.utils.geometry import rotation_matrix_to_angle_axis, rot6d_to_rotmat
from lib.models.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J14, SMPL_MEAN_PARAMS
class Bottleneck(nn.Module):
"""
Redefinition of Bottleneck residual block
Adapted from the official PyTorch implementation
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HMR(nn.Module):
"""
SMPL Iterative Regressor with ResNet50 backbone
"""
def __init__(self, block, layers, smpl_mean_params):
self.inplanes = 64
super(HMR, self).__init__()
npose = 24 * 6
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion + npose + 13, 1024)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(1024, npose)
self.decshape = nn.Linear(1024, 10)
self.deccam = nn.Linear(1024, 3)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
self.smpl = SMPL(
SMPL_MODEL_DIR,
batch_size=64,
create_transl=False
).to('cpu')
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
mean_params = np.load(smpl_mean_params)
init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)
init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def feature_extractor(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
xf = self.avgpool(x4)
xf = xf.view(xf.size(0), -1)
return xf
def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3, return_features=False):
batch_size = x.shape[0]
if init_pose is None:
init_pose = self.init_pose.expand(batch_size, -1)
if init_shape is None:
init_shape = self.init_shape.expand(batch_size, -1)
if init_cam is None:
init_cam = self.init_cam.expand(batch_size, -1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
xf = self.avgpool(x4)
xf = xf.view(xf.size(0), -1)
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([xf, pred_pose, pred_shape, pred_cam], 1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = self.decpose(xc) + pred_pose
pred_shape = self.decshape(xc) + pred_shape
pred_cam = self.deccam(xc) + pred_cam
pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
pred_output = self.smpl(
betas=pred_shape,
body_pose=pred_rotmat[:, 1:],
global_orient=pred_rotmat[:, 0].unsqueeze(1),
pose2rot=False
)
pred_vertices = pred_output.vertices
pred_joints = pred_output.joints
pred_keypoints_2d = projection(pred_joints, pred_cam)
pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3, 3)).reshape(-1, 72)
output = [{
'theta': torch.cat([pred_cam, pose, pred_shape], dim=1),
'verts': pred_vertices,
'kp_2d': pred_keypoints_2d,
'kp_3d': pred_joints,
}]
if return_features:
return xf, output
else:
return output
class Regressor(nn.Module):
def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS):
super(Regressor, self).__init__()
npose = 24 * 6
self.fc1 = nn.Linear(512 * 4 + npose + 13, 1024)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(1024, npose)
self.decshape = nn.Linear(1024, 10)
self.deccam = nn.Linear(1024, 3)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
self.smpl = SMPL(
SMPL_MODEL_DIR,
batch_size=64,
create_transl=False
)
mean_params = np.load(smpl_mean_params)
init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)
init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3, J_regressor=None):
batch_size = x.shape[0]
if init_pose is None:
init_pose = self.init_pose.expand(batch_size, -1)
if init_shape is None:
init_shape = self.init_shape.expand(batch_size, -1)
if init_cam is None:
init_cam = self.init_cam.expand(batch_size, -1)
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = self.decpose(xc) + pred_pose
pred_shape = self.decshape(xc) + pred_shape
pred_cam = self.deccam(xc) + pred_cam
pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
pred_output = self.smpl(
betas=pred_shape,
body_pose=pred_rotmat[:, 1:],
global_orient=pred_rotmat[:, 0].unsqueeze(1),
pose2rot=False
)
pred_vertices = pred_output.vertices
pred_joints = pred_output.joints
if J_regressor is not None:
J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(pred_vertices.device)
pred_joints = torch.matmul(J_regressor_batch, pred_vertices)
pred_joints = pred_joints[:, H36M_TO_J14, :]
pred_keypoints_2d = projection(pred_joints, pred_cam)
pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3, 3)).reshape(-1, 72)
output = [{
'theta' : torch.cat([pred_cam, pose, pred_shape], dim=1),
'verts' : pred_vertices,
'kp_2d' : pred_keypoints_2d,
'kp_3d' : pred_joints,
'rotmat' : pred_rotmat
}]
return output
def hmr(smpl_mean_params=SMPL_MEAN_PARAMS, pretrained=True, **kwargs):
"""
Constructs an HMR model with ResNet50 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)
if pretrained:
resnet_imagenet = resnet.resnet50(pretrained=True)
model.load_state_dict(resnet_imagenet.state_dict(), strict=False)
return model
def projection(pred_joints, pred_camera):
pred_cam_t = torch.stack([pred_camera[:, 1],
pred_camera[:, 2],
2 * 5000. / (224. * pred_camera[:, 0] + 1e-9)], dim=-1)
batch_size = pred_joints.shape[0]
camera_center = torch.zeros(batch_size, 2)
pred_keypoints_2d = perspective_projection(pred_joints,
rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1, -1).to(pred_joints.device),
translation=pred_cam_t,
focal_length=5000.,
camera_center=camera_center)
# Normalize keypoints to [-1,1]
pred_keypoints_2d = pred_keypoints_2d / (224. / 2.)
return pred_keypoints_2d
def perspective_projection(points, rotation, translation,
focal_length, camera_center):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1]
def get_pretrained_hmr():
device = 'cuda'
model = hmr().to(device)
checkpoint = torch.load(osp.join(VIBE_DATA_DIR, 'spin_model_checkpoint.pth.tar'))
model.load_state_dict(checkpoint['model'], strict=False)
model.eval()
return model
|
{"hexsha": "079d10b0cb968b1fc025b1989b6c01442ccdb432", "size": 12757, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/models/spin.py", "max_stars_repo_name": "omidrk/computervisionPanopticToSMPLAuto", "max_stars_repo_head_hexsha": "b84b60f0ec4ffdb4ae61348919a95f7bb2eab926", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/models/spin.py", "max_issues_repo_name": "omidrk/computervisionPanopticToSMPLAuto", "max_issues_repo_head_hexsha": "b84b60f0ec4ffdb4ae61348919a95f7bb2eab926", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/models/spin.py", "max_forks_repo_name": "omidrk/computervisionPanopticToSMPLAuto", "max_forks_repo_head_hexsha": "b84b60f0ec4ffdb4ae61348919a95f7bb2eab926", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1432506887, "max_line_length": 132, "alphanum_fraction": 0.5978678373, "include": true, "reason": "import numpy", "num_tokens": 3322}
|
"""The following module stores all methods used in the notebook."""
import ppscore as pps
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.model_selection import cross_val_score
class Experiment():
"""Base class to store the methods."""
def __init__(self):
"""Initialize the class object."""
def memory_stats(df):
"""Method to compute memory stats for each feature."""
return pd.DataFrame(
df.memory_usage(deep=True),
columns=['Memory']
)
def pps_heatmap(df):
"""
Function for calculating the Predictive Power Score and plotting a heatmap
Args:
Pandas DataFrame or Series object
__________
Returns:
figure
"""
pps_mtrx = pps.matrix(df)
pps_mtrx1 = pps_mtrx[['x', 'y', 'ppscore']].pivot(columns='x', index='y',
values='ppscore')
plt.figure(figsize = (24, 8))
ax = sb.heatmap(pps_mtrx1, vmin=0, vmax=1, cmap="afmhot_r", linewidths=0.5,
annot=True)
ax.set_title("PPS matrix")
ax.set_xlabel("feature")
ax.set_ylabel("target")
return ax
def corr_heatmap(df, mask: bool):
"""Method to visualize correlation."""
plt.figure(figsize=(24, 8))
if mask is True:
# Create mask
mask = np.zeros_like(df.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate Custom diverging cmap
sb.heatmap(df.corr(), annot=True, cmap='cividis', linewidth=.5,
mask=mask)
else:
sb.heatmap(df.corr(), annot=True, cmap='cividis')
return plt.show()
def mean_accuracy_score(est, X, y, cv: int):
"""Method to calculate average accuracy of the model."""
res = cross_val_score(est, X, y, cv=cv, n_jobs=-1, verbose=1,
scoring='accuracy')
print('Average Accuracy:', (np.mean(res)))
print('Average Standard Deviation:', (np.std(res)))
return
def plot_prc_figure(precision, recall, thresh):
"""Method for plotting Precision Recall Curve."""
plt.figure(figsize=(24, 10))
plt.plot(thresh, precision[:-1], 'r--', label='Precision')
plt.plot(thresh, recall[:-1], 'g--', label='Recall')
plt.title('Precision Recall Curve')
plt.xlabel('Threshold')
plt.legend(loc='best')
plt.ylim([-0.5, 1.5])
plt.show()
return
def roc_curve_plot(fpr, tpr, truth, pred, label=None):
"""Method to plot receiver operator characteristics curve."""
roc = print('ROC Score:', roc_auc_score(truth, pred))
plt.figure(figsize=(18, 10))
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
return roc
def plot_roc_curve(fpr, tpr, thresholds):
"""Method to plot roc curve with rich visualization."""
specs = pd.DataFrame({
'FALSE POSITIVE RATE': fpr,
'TRUE POSITIVE RATE': tpr
}, index=thresholds)
specs.index.name = "Thresholds"
specs.columns.name = "Rate"
fig = px.line(
specs, title='TPR AND FPR AT EVERY THRESHOLD', width=480,
height=640
)
fig.update_yaxes(scaleanchor="x", scaleratio=1)
fig.update_xaxes(range=[0, 1], constrain='domain')
return fig.show()
def print_score(est, X_train, X_test, y_train, y_test, train: bool):
"""
Method to output metrics of classifier using train or test data
"""
lab = LabelBinarizer()
lab.fit(y_train)
if train is True:
res = est.predict(X_train)
print("Train Score:\n")
print('Accuracy Score:{0:.4f}\n'.format(accuracy_score(y_train, res)))
print("ROC AUC:{0:.4f}\n".format(roc_auc_score(y_train, lab.transform(res))))
print("Classification Report: \n {} \n".format(
classification_report(y_train, res)
))
print("Confusion Matrix: \n {} \n".format(confusion_matrix(y_train, res)))
nres = cross_val_score(
est, X_train, y_train, cv=10, scoring='accuracy'
)
print("Average Accuracy: {0:.4f}\n".format(np.mean(nres)))
print("Average Standard Deviation {0:.4f}\n".format(np.std(nres)))
return
else:
res_test = est.predict(X_test)
print("Test Score:\n")
print('Accuracy Score:{0:.4f}\n'.format(accuracy_score(y_test, res_test)))
print("ROC AUC:{0:.4f}\n".format(roc_auc_score(y_test, lab.transform(res_test))))
print("Classification Report: \n {} \n".format(
classification_report(y_test, res_test)
))
print("Confusion Matrix: \n {} \n".format(confusion_matrix(y_test, res_test)))
return
|
{"hexsha": "3600f8b956c3e25d881716d3de198116e8abcbad", "size": 5515, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/src/note.py", "max_stars_repo_name": "ahmed14-cell/breast-cancer-classification", "max_stars_repo_head_hexsha": "7e37145c8d7fe267a12bc42b2e8e80d97b031390", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/src/note.py", "max_issues_repo_name": "ahmed14-cell/breast-cancer-classification", "max_issues_repo_head_hexsha": "7e37145c8d7fe267a12bc42b2e8e80d97b031390", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/src/note.py", "max_forks_repo_name": "ahmed14-cell/breast-cancer-classification", "max_forks_repo_head_hexsha": "7e37145c8d7fe267a12bc42b2e8e80d97b031390", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-03T13:16:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T13:16:33.000Z", "avg_line_length": 35.3525641026, "max_line_length": 93, "alphanum_fraction": 0.5673617407, "include": true, "reason": "import numpy", "num_tokens": 1276}
|
export Arguments
"""
type Arguments
positional::Tuple
keyword::Dict{Symbol, Any}
end
Will store positional and keyword arguments for later use.
Create with [`collect_arguments`](@ref). You can also [`merge`](@ref) two
`Arguments`, [`push`](@ref) or [`unshift`](@ref) in new
arguments, and run with [`run`](@ref).
"""
type Arguments
positional::Tuple
keyword::Dict{Symbol, Any}
end
export LazyCall
"""
type LazyCall{T <: Function}
arguments::Arguments
function_call::T
end
Will store a function along with its arguments for later use. Create
with [`collect_call`](@ref) and run with [`run`](@ref)
"""
type LazyCall{T <: Function}
arguments::Arguments
function_call::T
end
"""
merge(a::Arguments, b::Arguments)
Merge two [`Arguments`](@ref) types.
Positional arguments are added at the end, and new keyword arguments
are added to old keyword arguments, or, if the keys match, overwrite
them.
# Examples
```julia
merge_test = @chain begin
collect_arguments(1, a = 2, b = 3)
merge(_, collect_arguments(4, a = 5, c = 6) )
end
@test merge_test == collect_arguments(1, 4, a = 5, b = 3, c = 6)
```
"""
function Base.merge(a::Arguments, b::Arguments)
positional = (a.positional..., b.positional...)
keyword = merge(a.keyword, b.keyword)
Arguments(positional, keyword)
end
"""
merge(lazy_call::LazyCall, arguments::Arguments)
`merge` `arguments` into the `arguments` of `lazy_call`.
# Examples
```julia
merge_test = @chain begin
collect_arguments([1, 2])
unshift(_, vcat)
LazyCall(_, map)
merge(_, collect_arguments([3, 4]) )
run
end
@test merge_test == [[1, 3], [2, 4]]
```
"""
Base.merge(lazy_call::LazyCall, arguments::Arguments) = @chain begin
lazy_call.arguments
merge(_, arguments)
LazyCall(_, lazy_call.function_call)
end
export push
"""
push(arguments::Arguments, positional...; keyword...)
Add positional and keyword arguments to `arguments`.
Positional arguments are added at the end, and new keyword arguments
are added to old keyword arguments, or, if the keys match, overwrite
them.
# Examples
```julia
push_test = @chain begin
collect_arguments(1, a = 2, b = 3)
push(_, 4, a = 5, c = 6)
end
@test push_test == collect_arguments(1, 4, a = 5, b = 3, c = 6)
```
"""
push(a::Arguments, positional...; keyword...) =
@chain begin
keyword
Dict
Arguments(positional, _)
merge(a, _)
end
"""
push(lazy_call::LazyCall, positional...; keyword...)
`push` to `lazy_call.arguments`.
# Examples
```julia
push_test = @chain begin
collect_arguments([1, 2])
unshift(_, vcat)
LazyCall(_, map)
push(_, [3, 4])
run
end
@test push_test == [[1, 3], [2, 4]]
```
"""
push(lazy_call::LazyCall, positional...; keyword...) = @chain begin
lazy_call.arguments
push(_, positional...; keyword...)
LazyCall(_, lazy_call.function_call)
end
export unshift
"""
unshift(arguments::Arguments, positional...)
Add positional arguments to `arguments`.
New arguments are added at the start.
# Examples
```julia
unshift_test = @chain begin
collect_arguments(2, a = 3)
unshift(_, 1)
end
@test unshift_test == collect_arguments(1, 2, a = 3)
```
"""
unshift(a::Arguments, positional...) = @chain begin
positional
Arguments(_, Dict() )
merge(_, a)
end
"""
unshift(lazy_call::LazyCall, positional...)
`unshift` to `lazy_call.arguments`.
# Examples
```julia
unshift_test = @chain begin
collect_arguments([1, 2], [3, 4])
LazyCall(_, map)
unshift(_, vcat)
run
end
@test unshift_test == [[1, 3], [2, 4]]
```
"""
unshift(lazy_call::LazyCall, positional...) = @chain begin
lazy_call.arguments
unshift(_, positional...)
LazyCall(_, lazy_call.function_call)
end
export collect_arguments
"""
collect_arguments(positional...; keyword...)
Easy way to build an [`Arguments`](@ref) type.
# Examples
```julia
a = collect_arguments(1, 2, a = 3, b = 4)
@test a.positional == (1, 2)
@test a.keyword == Dict{Symbol, Any}(:a => 3, :b => 4)
```
"""
collect_arguments(positional...; keyword...) = @chain begin
keyword
Dict
Arguments(positional, _)
end
export collect_call
"""
collect_call(f::Function, positional...; keyword...)
Easy way to build a [`LazyCall`](@ref) type.
# Examples
```julia
l = collect_call(vcat, [1, 2], [3, 4])
@test l.function_call == vcat
@test l.arguments == collect_arguments([1, 2], [3, 4])
```
"""
collect_call(f, positional...; keyword...) = @chain begin
keyword
Dict
Arguments(positional, _)
LazyCall(_, f)
end
import Base.==
==(a::Arguments, b::Arguments) =
(a.positional == b.positional) && (a.keyword == b.keyword)
==(a::LazyCall, b::LazyCall) =
(a.function_call == b.function_call) && (a.arguments == b.arguments)
export run
"""
run(a::Arguments)
Call `run` on the [`Arguments`](@ref) in `a`
# Examples
```julia
run_test = @chain begin
collect_arguments([1, 2], [3, 4])
unshift(_, vcat)
collect_arguments(_, map)
run
end
@test run_test == map(vcat, [1, 2], [3, 4])
```
"""
Base.run(a::Arguments) = run(a, run)
"""
run(l::LazyCall)
Call `l.function_call` on the [`Arguments`](@ref) in `l.arguments`
# Examples
```julia
run_test = @chain begin
collect_arguments([1, 2], [3, 4])
unshift(_, vcat)
LazyCall(_, map)
run
end
@test run_test == map(vcat, [1, 2], [3, 4])
```
"""
Base.run(l::LazyCall) = run(l.arguments, l.function_call)
"""
run(a::Arguments, f::Function)
Call `f` on the [`Arguments`](@ref) in `a`
# Examples
```julia
run_test = @chain begin
collect_arguments([1, 2], [3, 4])
unshift(_, vcat)
run(_, map)
end
@test run_test == map(vcat, [1, 2], [3, 4])
```
"""
Base.run(a::Arguments, f::Function) = f(a.positional...; a.keyword...)
"""
run(l::LazyCall, f::Function)
Insert `l.function_call` as the first positional argument in
`l.arguments`, the standard position for functional programming,
then `run` `f` on the result.
# Examples
```julia
run_test = @chain begin
collect_arguments([1, 2], [3,4])
LazyCall(_, vcat)
run(_, map)
end
@test run_test == map(vcat, [1, 2], [3, 4])
```
"""
Base.run(l::LazyCall, f::Function) = @chain begin
l.arguments
unshift(_, l.function_call)
run(_, f)
end
export lazy_call
"""
@lazy_call(e)
Will break apart a function call into a [`LazyCall`](@ref) object.
# Examples
```julia
test_function(arguments...; keyword_arguments...) =
(arguments, keyword_arguments)
@test ( @lazy_call test_function(1, 2, a = 3) ) ==
collect_call(test_function, 1, 2, a = 3)
```
"""
lazy_call(e) =
MacroTools.@match e begin
a_(b__) => Expr(:call, :collect_call, a, b...)
a_ => a
end
@nonstandard lazy_call
export @lazy_call
|
{"hexsha": "a94c33a0f2edc9200f9f6f38f7dbaf60dc6d60de", "size": 6792, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LazyCall.jl", "max_stars_repo_name": "JuliaPackageMirrors/ChainMap.jl", "max_stars_repo_head_hexsha": "7217dd05ee494751a81469cd0082a0972e95591b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LazyCall.jl", "max_issues_repo_name": "JuliaPackageMirrors/ChainMap.jl", "max_issues_repo_head_hexsha": "7217dd05ee494751a81469cd0082a0972e95591b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LazyCall.jl", "max_forks_repo_name": "JuliaPackageMirrors/ChainMap.jl", "max_forks_repo_head_hexsha": "7217dd05ee494751a81469cd0082a0972e95591b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.3963963964, "max_line_length": 73, "alphanum_fraction": 0.6389870436, "num_tokens": 1977}
|
# Placeholders for built-in ports
module Fw {
port Cmd
port CmdReg
port CmdResponse
port Log
port LogText
port PrmGet
port PrmSet
port Time
port Tlm
}
port P
active component C {
async input port t1: [10] P priority 3 drop
sync input port t2: P
guarded input port t3: P
output port t4: P
async input port s1: [10] serial priority 3 drop
sync input port s2: serial
guarded input port s3: serial
output port s4: serial
command recv port cmdIn
command reg port cmdRegIn
command resp port cmdResponseIn
event port eventOut
param get port paramGetOut
param set port paramSetOut
telemetry port tlmOut
text event port textEventOut
time get port timeGetOut
}
|
{"hexsha": "01cb6356df6dff5c80667152b410f3e6860e75d9", "size": 709, "ext": "fpp", "lang": "FORTRAN", "max_stars_repo_path": "compiler/tools/fpp-check/test/port_instance/ok.fpp", "max_stars_repo_name": "kevin-f-ortega/fpp", "max_stars_repo_head_hexsha": "ee355fc99eb8040157c62e69f58ac6a8435cd981", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2021-02-11T19:54:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T22:13:17.000Z", "max_issues_repo_path": "compiler/tools/fpp-check/test/port_instance/ok.fpp", "max_issues_repo_name": "kevin-f-ortega/fpp", "max_issues_repo_head_hexsha": "ee355fc99eb8040157c62e69f58ac6a8435cd981", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 107, "max_issues_repo_issues_event_min_datetime": "2020-12-14T16:37:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T02:41:31.000Z", "max_forks_repo_path": "compiler/tools/fpp-check/test/port_instance/ok.fpp", "max_forks_repo_name": "kevin-f-ortega/fpp", "max_forks_repo_head_hexsha": "ee355fc99eb8040157c62e69f58ac6a8435cd981", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-02-19T08:28:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T20:25:34.000Z", "avg_line_length": 18.1794871795, "max_line_length": 50, "alphanum_fraction": 0.7320169252, "num_tokens": 217}
|
[STATEMENT]
lemma exception_of_option_\<I> [simp]: "map_\<I> id exception_of_option (stop_\<I> \<I>) = exception_\<I> \<I>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_\<I> id exception_of_option (stop_\<I> \<I>) = exception_\<I> \<I>
[PROOF STEP]
by(simp add: exception_\<I>_def)
|
{"llama_tokens": 123, "file": "Constructive_Cryptography_CM_More_CC", "length": 1}
|
[STATEMENT]
lemma elements_matD [dest]:
"a \<in> elements_mat A \<Longrightarrow> \<exists>i j. i < dim_row A \<and> j < dim_col A \<and> a = A $$ (i,j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> elements_mat A \<Longrightarrow> \<exists>i j. i < dim_row A \<and> j < dim_col A \<and> a = A $$ (i, j)
[PROOF STEP]
unfolding elements_mat_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> set (concat (map (\<lambda>i. map (\<lambda>j. A $$ (i, j)) [0..<dim_col A]) [0..<dim_row A])) \<Longrightarrow> \<exists>i j. i < dim_row A \<and> j < dim_col A \<and> a = A $$ (i, j)
[PROOF STEP]
by force
|
{"llama_tokens": 261, "file": "Jordan_Normal_Form_Matrix", "length": 2}
|
import cv2
import numpy as np
from car import Car
from liness import Line
from liness import Area
# def from_new_to_car_dic_and_obj(id):
# """
# 从new_car_dic中转移到car_dic中
# """
# global new_car_dic, cars_dic
# cars_dic[id] = new_car_dic[id] #转移数据
# del new_car_dic[id] #从new_car_dic删除该数据
#
#
# def judge_car_in_lines(bbox, core):
# """
# 判断车辆在哪条车道线
# """
# global lines_dic
# x1, x2, x3, x4 = [x[0] for x in bbox]
# y1, y2, y3, y4 = [y[1] for y in bbox]
# x_core, y_core = core
#all_line_fit = all_line #所有车道函数-------------------
#判断core
#for line in all_line_fit: #line是numpy中的poly1d
# if line(y_core) < x_core < line(y_core): #在这条车道上
# runrunrun #比较之前的车道位置,若是新的则添加,如果前后车道位置不一则直接判定车辆车道移动
# break
#在判断四点
#for line in all_line_fit:
# if core_shifting: #如果车道偏移直接判定啦,不用再判定4点
# break
##判断在车道
# def process_violations():
# """
# 处理违规
# """
# global cars_dic
# for car in cars_dic:
# if car['violation'][0] == 0: #没有违规则跳过
# break
# _, *car_error = car['violations']
# for error in car_error: #遍历所有违规
# if error == 'abc': #违规类型:
# pass #违规处理
# car['violation'][0] = 0 #违规处理完毕,初始化
#
#
# def run(bbox):
# """
# 运行
# :return:
# """
# global lines_dic
# x1, x2, x3, x4 = [x[0] for x in bbox]
# y1, y2, y3, y4 = [y[1] for y in bbox]
#判定车辆车头坐标
#core = get_car_core(bbox) #车辆中心
#line_result = judge_car_in_lines(bbox, core) #判断车辆在哪条车道线
#line_error_result = judge_car_in_line_error(line_result) #判断车辆在车道线的错误 比较之前的车道位置,若是新的则添加,如果前后车道位置不一则直接判定车辆车道移动
#check_car_have_error() #对结果进行检查,看是否有违规现象并更新数据,可同时对多个违规进行检查,并且保存数据
#--------- 此程序主要为了实现压线,连续变道,逆行,考虑加上右侧超车
#-------- 不按车道行驶,红灯放后面做,规则复杂
#bbox = car_dic[car_id]['bbox']
#x1, x2, x3, x4 = [x[0] for x in bbox]
#y1, y2, y3, y4 = [y[1] for y in bbox]
#core_x = bbox[0][0] + bbox[1][0] + bbox[2][0] + bbox[3][0] // 4
#core_y = bbox[0][1] + bbox[1][1] + bbox[2][1] + bbox[3][1] // 4
line_point = [(400, 450), (700, 450)] #先标几个点
line_point2 = [(450, 550), (600, 550)]
lines_lis = [line_point] #直线
lines_dic = {} #线
def creat_lines(img):
"""
实例化line
"""
global lines_lis, lines_dic
for num, line in enumerate(lines_lis): #遍历所有line
line_name = 'line' + f'{num}'
lines_dic[line_name] = Line(line_name, line[0], line[1]) #实例化线
cv2.line(img, line[0], line[1], (0, 0, 255), 4, 4) #可视化
new_car_dic = {} # 新检测到的车辆对象
cars_dic = {} # 已检测到的车辆
def is_new_car(car_ids, bboxs, plates):
"""
判断是否为新检测到的车辆,同时建立车辆对象
"""
global new_car_dic, cars_dic
for obj in range(len(car_ids)): #遍历所有车辆id
car_id = car_ids[obj]
#print(f'\nbboxs[obj]:{bboxs[obj]}, bbox[0]: {bboxs[obj][0]}')
if car_id in cars_dic: #如果车辆在车辆储存区
cars_dic[car_id]['bbox'] = bboxs[obj] #更新坐标
elif car_id in new_car_dic: #如果在车辆临时储存区
new_car_dic[car_id]['count'] += 1
#print(f'-----------------------id:{car_id}, count: {new_car_dic[car_id]["count"]}')
if new_car_dic[car_id]['count'] >= 2: #连续检测值大于3才会视为车辆目标,减少误检测
cars_dic[car_id] = Car() #实例化car
cars_dic[car_id]['bbox'] = bboxs[obj] #添加bbox
#print(f'------------------------creat Car: {car_id}')
else:
new_car_dic[car_id] = {'count': 0} #新车辆放到临时存储区
#print(f'--------------------id:{car_id}, count: {new_car_dic[car_id]["count"]}')
for car_id in list(cars_dic.keys()): #遍历所有id,更新cars_dic(没检测到对象--,小于-10删除),和new_car_dic(没检测到对象-- 小于-3删除)
if car_id not in car_ids:
cars_dic[car_id]['count'] -= 1
if cars_dic[car_id]['count'] <= -10:
del cars_dic[car_id]
for car_id in list(new_car_dic.keys()):
if car_id not in car_ids:
new_car_dic[car_id]['count'] -= 1
if new_car_dic[car_id]['count'] <= -3:
del new_car_dic[car_id]
area_point = [(500, 520), (400, 650), (600, 650), (650, 520)] #先标几个点 逆或顺时针,排序算法还在做
#area_point2 = [(700, 600), (1200, 600), (1200, 500), (700, 500)] #先标几个点 逆或顺时针,排序算法还在做
area_point3 = [(560, 300), (560, 400), (660, 400), (660, 300)] #先标几个点 逆或顺时针,排序算法还在做
area_lis = [area_point, area_point3] #区域直线集
area_dic = {} #所有区域字典
def creat_area(img):
"""
实例化area
"""
global area_lis, area_dic
#排序(逆时针)
#循环建line
for area_count, area in enumerate(area_lis): #遍历所有area
area_lines_lis = []
area_name = 'area' + f'{area_count}'
area_lines_lis.append(Line('line0', area[0], area[-1])) #建立line
cv2.line(img, area[0], area[-1], (0, 0, 255), 4, 4) # 可视化
for count, point in enumerate(area): #遍历所有area里的point
if count == len(area)-1:
break
line_name = 'line' + f'{count+1}'
area_lines_lis.append(Line(line_name, area[count], area[count+1])) # 实例化线
cv2.line(img, area[count], area[count+1], (0, 0, 255), 4, 4) # 可视化
area_dic[area_name] = Area(area_name, area_lines_lis) #实例化Area
# def crash_area_test(car_id, img):
# """
# 简单地测试一下简单的功能
# """
# global cars_dic, area_dic
# core = cars_dic[car_id].get_core()
# for area in area_dic.get(): # 遍历所有area
# area_name = area.get_name() # 获得当前area名字
# if area.get_minmaxy()[0] < core[1] < area.get_minmaxy()[1] and \
# area.get_minmaxx()[0] < core[0] < area.get_minmaxx()[1]: # 在检测范围内
# if area.pointx_in(core[0]) and area.pointy_in(core[1]): # core在area内部
# if area_name in cars_dic[car_id]['violation']: # 已有记录
# cars_dic[car_id]['violation'][area_name][0] = 1 # 添加违规标记
# else:
# cars_dic[car_id]['violation'][area_name] = [1] # 添加违规标记,添加记录
# continue
def crash_area(car_id, img):
"""
区域禁行
"""
global cars_dic, area_dic
core = cars_dic[car_id].get_core()
core = (int(core[0]), int(core[1]))
for area in list(area_dic.keys()): #遍历所有area
area_name = area_dic[area].get_name() #获得当前area名字
xmin, xmax, ymin, ymax = area_dic[area_name].get_minmaxxy_area(accurate=True) #获得检测范围
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 255, 255), 2) #检测范围可视化
if ymin < core[1] < ymax and xmin < core[0] < xmax: #在检测范围内
#print(f'ID: {car_id}条件符合,碰撞检测!')
if area_dic[area_name].core_in_area(core, img): #core在area内部
#print(f'ID: {car_id} 碰撞Area')
if area_name in cars_dic[car_id]['violations']: #已有记录
cars_dic[car_id]['violations'][area_name][0] = 1 #添加违规标记
else:
cars_dic[car_id]['violations'][area_name] = [1, area_name] #添加违规标记,添加记录
zi = f'YaArea, ID={car_id}'
cv2.putText(img, zi, (int(cars_dic[car_id]['bbox'][0][0]), int(cars_dic[car_id]['bbox'][0][1])),
4, .8, (0, 0, 255), 1) # 可视化
def crash_line(car_id, img):
"""
压线检测
"""
global cars_dic, lines_dic
#print(f'ID: {car_id}执行压线检测')
core = cars_dic[car_id].get_core()
for line_name in list(lines_dic.keys()): #遍历所有线段
if lines_dic[line_name].get_minmaxy()[0] < core[1] < lines_dic[line_name].get_minmaxy()[1] and \
lines_dic[line_name].get_minmaxx()[0] < core[0] < lines_dic[line_name].get_minmaxx()[1]: #在检测范围内
#print(f'ID: {car_id}条件符合,压线检测!')
if core[0] > lines_dic[line_name].fy_to_x(core[1]): #开始检测
position = 'R'
else:
position = 'L'
if line_name in cars_dic[car_id]['violations']: #已有记录
if position != cars_dic[car_id]['violations'][line_name][1]: #与上次位置不同
cars_dic[car_id]['violations'][line_name][0] = 1 #添加违规标记
zi = f'YaXian, ID={car_id}'
cv2.putText(img, zi, (int(cars_dic[car_id]['bbox'][0][0]+30), int(cars_dic[car_id]['bbox'][0][1])),
4, .8, (0, 0, 255), 1) #可视化
else:
cars_dic[car_id]['violations'][line_name] = [0, position] #保存位置
#violations = [crash_line] #这是所有违规检测项目
#violations = [crash_area]
violations = [crash_line, crash_area]
def detect_violation(car_ids, img):
"""
违规检测
"""
global cars_dic, violations
for violation in violations: #遍历所有违规
for car_id in car_ids: #遍历所有当前检测车辆
if car_id in cars_dic: #当前车辆id已经在cars_dic实例化
car_info = cars_dic[car_id] #当前车辆信息
violation(car_id, img) #执行检测
# def pre_creat(img):
# creat_lines(img) #实例化线
# creat_area(img) #实例化区域
def detect_violation_run(car_ids, bboxs, img):
"""
违规检测运行
"""
creat_lines(img) #实例化线
creat_area(img) #实例化区域
#车牌
###记录每个新侦测到的id到new_car_dic,如果后几次连续检测出,创建Car类,参与违规判定。多次检测不到的车辆进行数据清理
is_new_car(car_ids, bboxs, plates='NULL')
#方向检测
### 违规预测
detect_violation(car_ids, img) #传入当前检测到的所有车辆
###输出违规
###
if len(cars_dic):
for car_key in cars_dic.keys():
zi = f'creatID:{car_key}'
cv2.putText(img, zi, (int(cars_dic[car_key]['bbox'][0][0]), int(cars_dic[car_key]['bbox'][0][1])-15),
4, .5, (0, 0, 255), 1) # 可视化
# if len(cars_dic) != 0:
# for iid in list(cars_dic.keys()):
# print(f'\n{cars_dic[iid]["violations"]}')
if __name__ == '__main__':
pass
|
{"hexsha": "5b1cea7e7f172637607a1cd337f3bd93e61e39b3", "size": 9693, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/prediction/Violation.py", "max_stars_repo_name": "csd2022fuchuang/yolov5-opencv-cpp-python", "max_stars_repo_head_hexsha": "5b52dbffed6733a1353bd27a0001c09821ee0714", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/prediction/Violation.py", "max_issues_repo_name": "csd2022fuchuang/yolov5-opencv-cpp-python", "max_issues_repo_head_hexsha": "5b52dbffed6733a1353bd27a0001c09821ee0714", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/prediction/Violation.py", "max_forks_repo_name": "csd2022fuchuang/yolov5-opencv-cpp-python", "max_forks_repo_head_hexsha": "5b52dbffed6733a1353bd27a0001c09821ee0714", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-24T09:01:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T09:01:45.000Z", "avg_line_length": 28.2594752187, "max_line_length": 119, "alphanum_fraction": 0.5572062313, "include": true, "reason": "import numpy", "num_tokens": 3737}
|
#
# This file is a part of MolecularGraph.jl
# Licensed under the MIT License http://opensource.org/licenses/MIT
#
@testset "graph.dag" begin
graph = plaindigraph(10, [
(1, 4), (2, 4), (3, 7), (4, 5), (4, 6),
(4, 7), (6, 9), (7, 8), (7, 9), (7, 10)
])
@test issetequal(ancestors(graph, 7), [1, 2, 3, 4])
@test issetequal(descendants(graph, 4), [5, 6, 7, 8, 9, 10])
nodes = topologicalsort(graph)
@test nodes[4] == 4
@test nodes[end] in [5, 8, 9, 10]
end # graph.shortestpath
|
{"hexsha": "01d46885f4a30ee2a05208996da7f71810c1c0ad", "size": 520, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/graph/dag.jl", "max_stars_repo_name": "hhaensel/MolecularGraph.jl", "max_stars_repo_head_hexsha": "c54ccdf09274e36ed3d866604f99b497a39bfaf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 126, "max_stars_repo_stars_event_min_datetime": "2019-01-28T06:54:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T08:45:46.000Z", "max_issues_repo_path": "test/graph/dag.jl", "max_issues_repo_name": "timholy/MolecularGraph.jl", "max_issues_repo_head_hexsha": "90d6f6175f30023ffce92d9bbc386f8659866508", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2019-04-19T03:33:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:34:44.000Z", "max_forks_repo_path": "test/graph/dag.jl", "max_forks_repo_name": "timholy/MolecularGraph.jl", "max_forks_repo_head_hexsha": "90d6f6175f30023ffce92d9bbc386f8659866508", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2019-02-07T04:08:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T03:33:20.000Z", "avg_line_length": 30.5882352941, "max_line_length": 67, "alphanum_fraction": 0.5634615385, "num_tokens": 211}
|
################################################################################
#
# Abstract types
#
################################################################################
# abstract spaces
abstract type AbsSpace{S} end
# abstract lattices
abstract type AbsLat{S} end
################################################################################
#
# Quadratic spaces
#
################################################################################
@attributes mutable struct QuadSpace{S, T} <: AbsSpace{S}
K::S
gram::T
function QuadSpace(K::S, G::T) where {S, T}
# I also need to check if the gram matrix is Hermitian
if dense_matrix_type(elem_type(S)) === T
z = new{S, T}(K, G)
else
try
Gc = change_base_ring(K, G)
if typeof(Gc) !== dense_matrix_type(elem_type(S))
error("Cannot convert entries of the matrix to the number field")
end
@assert base_ring(Gc) === K
z = new{S, dense_matrix_type(elem_type(S))}(K, Gc)
return z
catch e
rethrow(e)
end
end
end
end
################################################################################
#
# Hermitian spaces
#
################################################################################
@attributes mutable struct HermSpace{S, T, U, W} <: AbsSpace{S}
E::S
K::T
gram::U
involution::W
function HermSpace(E::S, gram::U) where {S, U}
# I also need to check if the gram matrix is Hermitian
if dense_matrix_type(elem_type(S)) === U
gramc = gram
else
try
gramc = change_base_ring(E, gram)
if typeof(gramc) !== dense_matrix_type(elem_type(S))
error("Cannot convert entries of the matrix to the number field")
end
catch e
rethrow(e)
end
end
@assert degree(E) == 2
A = automorphisms(E)
a = gen(E)
if A[1](a) == a
involution = A[2]
else
involution = A[1]
end
K = base_field(E)
z = new{S, typeof(K), dense_matrix_type(elem_type(S)), typeof(involution)}(E, K, gramc, involution)
return z
end
end
|
{"hexsha": "6762d0ccf5a1e1a0c8aa13073044fbc459a17aa8", "size": 2126, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/QuadForm/Types.jl", "max_stars_repo_name": "albinahlback/Hecke.jl", "max_stars_repo_head_hexsha": "728be6098a8dbfe2589fca77e57f10950cefe9e7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/QuadForm/Types.jl", "max_issues_repo_name": "albinahlback/Hecke.jl", "max_issues_repo_head_hexsha": "728be6098a8dbfe2589fca77e57f10950cefe9e7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/QuadForm/Types.jl", "max_forks_repo_name": "albinahlback/Hecke.jl", "max_forks_repo_head_hexsha": "728be6098a8dbfe2589fca77e57f10950cefe9e7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0117647059, "max_line_length": 103, "alphanum_fraction": 0.458607714, "num_tokens": 513}
|
module FIGlet
using Pkg
if isdefined(Pkg, :Artifacts)
using Pkg.Artifacts
@eval fontsroot = artifact"fonts"
else
fontsroot = normpath(@__DIR__, "..", "deps")
end
const FONTSDIR = abspath(normpath(joinpath(fontsroot, "FIGletFonts-0.5.0", "fonts")))
const UNPARSEABLES = [
"nvscript.flf",
]
abstract type FIGletError <: Exception end
"""
Width is not sufficient to print a character
"""
struct CharNotPrinted <: FIGletError end
"""
Font can't be located
"""
struct FontNotFoundError <: FIGletError
msg::String
end
Base.showerror(io::IO, e::FontNotFoundError) = print(io, "FontNotFoundError: $(e.msg)")
"""
Problem parsing a font file
"""
struct FontError <: FIGletError
msg::String
end
Base.showerror(io::IO, e::FontError) = print(io, "FontError: $(e.msg)")
"""
Color is invalid
"""
struct InvalidColorError <: FIGletError end
Base.@enum(Layout,
FullWidth = -1,
HorizontalSmushingRule1 = 1,
HorizontalSmushingRule2 = 2,
HorizontalSmushingRule3 = 4,
HorizontalSmushingRule4 = 8,
HorizontalSmushingRule5 = 16,
HorizontalSmushingRule6 = 32,
HorizontalFitting = 64,
HorizontalSmushing = 128,
VerticalSmushingRule1 = 256,
VerticalSmushingRule2 = 512,
VerticalSmushingRule3 = 1024,
VerticalSmushingRule4 = 2048,
VerticalSmushingRule5 = 4096,
VerticalFitting = 8192,
VerticalSmushing = 16384,
)
struct FIGletHeader
hardblank::Char
height::Int
baseline::Int
max_length::Int
old_layout::Int
comment_lines::Int
print_direction::Int
full_layout::Int
codetag_count::Int
function FIGletHeader(
hardblank,
height,
baseline,
max_length,
old_layout,
comment_lines,
print_direction=0,
full_layout=Int(HorizontalSmushingRule2),
codetag_count=0,
args...,
)
length(args) >0 && @warn "Received unknown header attributes: `$args`."
height < 1 && ( height = 1 )
max_length < 1 && ( max_length = 1 )
print_direction < 0 && ( print_direction = 0 )
# max_length += 100 # Give ourselves some extra room
new(hardblank, height, baseline, max_length, old_layout, comment_lines, print_direction, full_layout, codetag_count)
end
end
function FIGletHeader(
hardblank,
height::AbstractString,
baseline::AbstractString,
max_length::AbstractString,
old_layout::AbstractString,
comment_lines::AbstractString,
print_direction::AbstractString="0",
full_layout::AbstractString="2",
codetag_count::AbstractString="0",
args...,
)
return FIGletHeader(
hardblank,
parse(Int, height),
parse(Int, baseline),
parse(Int, max_length),
parse(Int, old_layout),
parse(Int, comment_lines),
parse(Int, print_direction),
parse(Int, full_layout),
parse(Int, codetag_count),
args...,
)
end
struct FIGletChar
ord::Char
thechar::Matrix{Char}
end
struct FIGletFont
header::FIGletHeader
font_characters::Dict{Char,FIGletChar}
version::VersionNumber
end
Base.show(io::IO, ff::FIGletFont) = print(io, "FIGletFont(n=$(length(ff.font_characters)))")
function readmagic(io)
magic = read(io, 5)
magic[1:4] != UInt8['f', 'l', 'f', '2'] && throw(FontError("File is not a valid FIGlet Lettering Font format. Magic header values must start with `flf2`."))
magic[5] != UInt8('a') && @warn "File may be a FLF format but not flf2a."
return magic # File has valid FIGlet Lettering Font format magic header.
end
function readfontchar(io, ord, height)
s = readline(io)
width = length(s)-1
width == -1 && throw(FontError("Unable to find character `$ord` in FIGlet Font."))
thechar = Matrix{Char}(undef, height, width)
for (w, c) in enumerate(s)
w > width && break
thechar[1, w] = c
end
for h in 2:height
s = readline(io)
for (w, c) in enumerate(s)
w > width && break
thechar[h, w] = c
end
end
return FIGletChar(ord, thechar)
end
Base.show(io::IO, fc::FIGletChar) = print(io, "FIGletChar(ord='$(fc.ord)')")
function readfont(s::AbstractString)
name = s
if !isfile(name)
name = abspath(normpath(joinpath(FONTSDIR, name)))
if !isfile(name)
name = "$name.flf"
!isfile(name) && throw(FontNotFoundError("Cannot find font `$s`."))
end
end
font = open(name) do f
readfont(f)
end
return font
end
function readfont(io)
magic = readmagic(io)
header = split(readline(io))
fig_header = FIGletHeader(
header[1][1],
header[2:end]...,
)
for i in 1:fig_header.comment_lines
discard = readline(io)
end
fig_font = FIGletFont(
fig_header,
Dict{Char, FIGletChar}(),
v"2.0.0",
)
for c in ' ':'~'
fig_font.font_characters[c] = readfontchar(io, c, fig_header.height)
end
for c in ['Ä', 'Ö', 'Ü', 'ä', 'ö', 'ü', 'ß']
if bytesavailable(io) > 1
fig_font.font_characters[c] = readfontchar(io, c, fig_header.height)
end
end
while bytesavailable(io) > 1
s = readline(io)
strip(s) == "" && continue
s = split(s)[1]
c = if '-' in s
Char(-(parse(UInt16, strip(s, '-'))))
else
Char(parse(Int, s))
end
fig_font.font_characters[c] = readfontchar(io, c, fig_header.height)
end
return fig_font
end
function availablefonts(substring)
fonts = String[]
for (root, dirs, files) in walkdir(FONTSDIR)
for file in files
if !(file in UNPARSEABLES)
if occursin(lowercase(substring), lowercase(file)) || substring == ""
push!(fonts, replace(file, ".flf"=>""))
end
end
end
end
sort!(fonts)
return fonts
end
"""
availablefonts() -> Vector{String}
availablefonts(substring::AbstractString) -> Vector{String}
Returns all available fonts.
If `substring` is passed, returns available fonts that contain the case insensitive `substring`.
Example:
julia> availablefonts()
680-element Array{String,1}:
"1943____"
"1row"
⋮
"zig_zag_"
"zone7___"
julia> FIGlet.availablefonts("3d")
5-element Array{String,1}:
"3D Diagonal"
"3D-ASCII"
"3d"
"Henry 3D"
"Larry 3D"
julia>
"""
availablefonts() = availablefonts("")
function render(io, text::AbstractString, font::FIGletFont)
iob = IOBuffer()
fig_chars = FIGletChar[]
for c in text
fc = font.font_characters[c]
push!(fig_chars, fc)
end
for r in 1:font.header.height
for fc in fig_chars
print(iob, join(fc.thechar[r, :]))
end
print(iob, '\n')
end
print(io, replace(String(take!(iob)), font.header.hardblank=>' '))
end
render(io, text::AbstractString, font::AbstractString) = render(io, text, readfont(font))
"""
render(text::AbstractString, font::Union{AbstractString, FIGletFont})
Renders `text` using `font` to `stdout`
Example:
render("hello world", "standard")
render("hello world", readfont("standard"))
"""
render(text::AbstractString, font) = render(stdout, text, font)
end # module
|
{"hexsha": "513b32a3d2fe8a5ecf4ff6670b03cfee226cb2f6", "size": 8298, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FIGlet.jl", "max_stars_repo_name": "wookay/TestFIGlet", "max_stars_repo_head_hexsha": "44ecf528acc28b925a1523093f4ccf4372973a42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/FIGlet.jl", "max_issues_repo_name": "wookay/TestFIGlet", "max_issues_repo_head_hexsha": "44ecf528acc28b925a1523093f4ccf4372973a42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FIGlet.jl", "max_forks_repo_name": "wookay/TestFIGlet", "max_forks_repo_head_hexsha": "44ecf528acc28b925a1523093f4ccf4372973a42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6816720257, "max_line_length": 160, "alphanum_fraction": 0.5491684743, "num_tokens": 2045}
|
@testset "Forcings" begin
fx = zeros(5,5)
fy = zeros(5,5)
f1 = zeros(30)
# For structs
sys = Swalbe.SysConst(Lx=5, Ly=5)
state = Swalbe.Sys(sys, "CPU")
state.height .= 1.0
# One dim model
sys1D = Swalbe.SysConst_1D(L=30)
state1D = Swalbe.Sys(sys1D)
state1D.height .= 1.0
@testset "Slippage" begin
# No velocities
Swalbe.slippage!(fx, fy, ones(5,5), zeros(5,5), zeros(5,5), 1.0, 1/6)
Swalbe.slippage!(state, sys)
@test all(fx .== 0.0)
@test all(fy .== 0.0)
@test all(state.slipx .== 0.0)
@test all(state.slipy .== 0.0)
# Velocity in x
Swalbe.slippage!(fx, fy, ones(5,5), fill(0.1,5,5), zeros(5,5), 1.0, 1/6)
state.velx .= 0.1
Swalbe.slippage!(state, sys)
@test all(fx .== 0.1/11)
@test all(fy .== 0.0)
# At some point I have to find out how to use all() with atol
# Thanks to Tilman for the tip!
@test all(isapprox.(state.slipx, 0.1/11; atol=1e-8))
@test all(state.slipy .== 0.0)
# Velocity in y
Swalbe.slippage!(fx, fy, ones(5,5), zeros(5,5), fill(0.1,5,5), 1.0, 1/6)
state.velx .= 0.0
state.vely .= 0.1
Swalbe.slippage!(state, sys)
@test all(fx .== 0.0)
@test all(fy .== 0.1/11)
@test all(state.slipx .== 0.0)
all(isapprox.(state.slipy, 0.1/11; atol=1e-8))
# Velocity
Swalbe.slippage!(fx, fy, ones(5,5), fill(-0.1,5,5), fill(0.1,5,5), 1.0, 1/6)
state.velx .= -0.1
state.vely .= 0.1
Swalbe.slippage!(state, sys)
@test all(fx .== -0.1/11)
@test all(fy .== 0.1/11)
@test all(isapprox.(state.slipx, -0.1/11; atol=1e-8))
@test all(isapprox.(state.slipy, 0.1/11; atol=1e-8))
# No slip
Swalbe.slippage!(fx, fy, ones(5,5), fill(-0.1,5,5), fill(0.1,5,5), 0.0, 1/6)
state.velx .= -0.1
state.vely .= 0.1
sys2 = Swalbe.SysConst(Lx=5,Ly=5,δ=0.0)
Swalbe.slippage!(state, sys2)
@test all(fx .== -0.1/2)
@test all(fy .== 0.1/2)
@test all(isapprox.(state.slipx, -0.1/2; atol=1e-8))
@test all(isapprox.(state.slipy, 0.1/2; atol=1e-8))
end
@testset "Slippage 1D" begin
# No velocities
Swalbe.slippage!(f1, ones(30), zeros(30), 1.0, 1/6)
Swalbe.slippage!(state1D, sys1D)
@test all(f1 .== 0.0)
@test all(state1D.slip .== 0.0)
# Velocity in x
Swalbe.slippage!(f1, ones(30), fill(0.1,30), 1.0, 1/6)
state1D.vel .= 0.1
state1D.slip .= 0.0
Swalbe.slippage!(state1D, sys1D)
@test all(f1 .== 0.1/11)
@test all(isapprox.(state1D.slip, 0.1/11; atol=1e-8))
# No slip
Swalbe.slippage!(f1, ones(30), fill(-0.1,30), 0.0, 1/6)
state1D.vel .= -0.1
state1D.slip .= 0.0
sys1D2 = Swalbe.SysConst_1D(L=30, δ=0.0)
Swalbe.slippage!(state1D, sys1D2)
@test all(f1 .== -0.1/2)
@test all(isapprox.(state1D.slip, -0.1/2; atol=1e-8))
end
@testset "Pressure gradient" begin
sys = Swalbe.SysConst(Lx=5, Ly=5)
state = Swalbe.Sys(sys, "CPU")
state.pressure .= reshape(collect(1.0:25),5,5)
state.height .= 1.0
Swalbe.h∇p!(state)
solx = [-1.5 -1.5 -1.5 -1.5 -1.5;
1.0 1.0 1.0 1.0 1.0;
1.0 1.0 1.0 1.0 1.0;
1.0 1.0 1.0 1.0 1.0;
-1.5 -1.5 -1.5 -1.5 -1.5]
soly = [-7.5 5.0 5.0 5.0 -7.5;
-7.5 5.0 5.0 5.0 -7.5;
-7.5 5.0 5.0 5.0 -7.5;
-7.5 5.0 5.0 5.0 -7.5;
-7.5 5.0 5.0 5.0 -7.5]
@test all(state.h∇px .== solx)
@test all(state.h∇py .== soly)
end
@testset "Thermal" begin
f1 = ones(50,50)
f2 = ones(50,50)
vartest = 2*0.01/11
Swalbe.thermal!(f1, f2, ones(50,50), 0.01, 1/6, 1.0)
@test mean(f1) ≈ 0.0 atol=1e-2
@test mean(f2) ≈ 0.0 atol=1e-2
@test var(f1) ≈ vartest atol=vartest/10
@test var(f2) ≈ vartest atol=vartest/10
# Using structs
sys = Swalbe.SysConst(Lx=50, Ly=50, kbt=0.01)
state = Swalbe.Sys(sys, "CPU", kind="thermal")
Swalbe.thermal!(state, sys)
@test mean(state.kbtx) ≈ 0.0 atol=1e-2
@test mean(state.kbty) ≈ 0.0 atol=1e-2
@test var(state.kbtx) ≈ vartest atol=vartest/10
@test var(state.kbtx) ≈ vartest atol=vartest/10
# More thermal energy
vartest = 0.2/11
Swalbe.thermal!(f1, f2, ones(50,50), 0.1, 1/6, 1.0)
@test mean(f1) ≈ 0.0 atol=1e-2
@test mean(f2) ≈ 0.0 atol=1e-2
@test var(f1) ≈ vartest atol=vartest/10
@test var(f2) ≈ vartest atol=vartest/10
# Using structs
sys = Swalbe.SysConst(Lx=50, Ly=50, kbt=0.1)
state = Swalbe.Sys(sys, "CPU", kind="thermal")
Swalbe.thermal!(state, sys)
@test mean(state.kbtx) ≈ 0.0 atol=1e-2
@test mean(state.kbty) ≈ 0.0 atol=1e-2
@test var(state.kbtx) ≈ vartest atol=vartest/10
@test var(state.kbtx) ≈ vartest atol=vartest/10
end
@testset "Thermal 1D" begin
f1D = ones(100000)
vartest = 2*0.01/11
Swalbe.thermal!(f1D, ones(100000), 0.01, 1/6, 1.0)
@test mean(f1D) ≈ 0.0 atol=1e-2
@test var(f1D) ≈ vartest atol=vartest/10
# Using structs
sys = Swalbe.SysConst_1D(L=100000,δ=1.0,kbt=0.01)
state = Swalbe.Sys(sys, kind="thermal")
Swalbe.thermal!(state, sys)
@test mean(state.kbt) ≈ 0.0 atol=1e-2
@test var(state.kbt) ≈ vartest atol=vartest/10
vartest = 0.2/11
Swalbe.thermal!(f1D, ones(100000), 0.1, 1/6, 1.0)
@test mean(f1D) ≈ 0.0 atol=1e-2
@test var(f1D) ≈ vartest atol=vartest/10
# Using structs
sys = Swalbe.SysConst_1D(L=100000,δ=1.0,kbt=0.1)
state = Swalbe.Sys(sys, kind="thermal")
Swalbe.thermal!(state, sys)
@test mean(state.kbt) ≈ 0.0 atol=1e-2
@test var(state.kbt) ≈ vartest atol=vartest/10
end
@testset "rho update" begin
@testset "Constant fields" begin
rho = ones(25)
height = ones(25)
output = zeros(25)
Swalbe.update_rho!(rho, output, height, zeros(25,2), zeros(25,4))
@test all(rho .== 1)
end
end
@testset "view four" begin
dummy = reshape(collect(1:20),5,4)
d1, d2, d3, d4 = Swalbe.view_four(dummy)
@test all(d1 .== dummy[:,1])
@test all(d2 .== dummy[:,2])
@test all(d3 .== dummy[:,3])
@test all(d4 .== dummy[:,4])
end
end
|
{"hexsha": "350b00b5f1e3a67f3a4de2c416c9575f4617ee69", "size": 6734, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/forcing.jl", "max_stars_repo_name": "aedolfi/Swalbe.jl", "max_stars_repo_head_hexsha": "8722b1e2f881b2917d8e2faca1ea58b0e9485ce5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/forcing.jl", "max_issues_repo_name": "aedolfi/Swalbe.jl", "max_issues_repo_head_hexsha": "8722b1e2f881b2917d8e2faca1ea58b0e9485ce5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/forcing.jl", "max_forks_repo_name": "aedolfi/Swalbe.jl", "max_forks_repo_head_hexsha": "8722b1e2f881b2917d8e2faca1ea58b0e9485ce5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4, "max_line_length": 84, "alphanum_fraction": 0.5164835165, "num_tokens": 2725}
|
import sys
if sys.path[0] != '/mnt/home/landerson/.local/lib/python3.6/site-packages':
sys.path.insert(0, '/mnt/home/landerson/.local/lib/python3.6/site-packages/astroML-0.3-py3.6.egg')
sys.path.insert(0, '/mnt/home/landerson/.local/lib/python3.6/site-packages/xdgmm-1.0.9-py3.6.egg')
sys.path.insert(0, '/mnt/home/landerson/.local/lib/python3.6/site-packages')
import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
import numpy as np
import matplotlib as mpl
from scipy import stats
from dustmaps.sfd import SFDQuery
from dustmaps.bayestar import BayestarQuery
import astropy.coordinates as coord
import astropy.units as u
import scipy.stats
from astropy.table import Table, unique, Column, hstack, vstack
import healpy as hp
from xdgmm import XDGMM
import scipy.optimize as op
import emcee
import corner
def dust(ra, dec, distance, max_samples=10, mode='median'):
c = coord.SkyCoord(ra, dec, distance=distance)
sfd = SFDQuery()
bayes = BayestarQuery(max_samples=max_samples)
return sfd(c), bayes(c, mode=mode, return_flags=False) #, iphas(c, mode=mode), marshall(c), chen(c)
def getDust(G, bp, rp, ebv, maxnit=100):
""" Compute the Gaia extinctions assuming relations from Babusieux
Arguments: G, bp, rp, E(B-V)
maxnit -- number of iterations
Returns extinction in G,bp, rp
Author: Sergey Koposov skoposov@cmu.edu
"""
c1, c2, c3, c4, c5, c6, c7 = [0.9761, -0.1704,
0.0086, 0.0011, -0.0438, 0.0013, 0.0099]
d1, d2, d3, d4, d5, d6, d7 = [
1.1517, -0.0871, -0.0333, 0.0173, -0.0230, 0.0006, 0.0043]
e1, e2, e3, e4, e5, e6, e7 = [
0.6104, -0.0170, -0.0026, -0.0017, -0.0078, 0.00005, 0.0006]
A0 = 3.1*ebv
P1 = np.poly1d([c1, c2, c3, c4][::-1])
def F1(bprp): return np.poly1d(
[c1, c2, c3, c4][::-1])(bprp)+c5*A0+c6*A0**2+c7*bprp*A0
def F2(bprp): return np.poly1d(
[d1, d2, d3, d4][::-1])(bprp)+d5*A0+d6*A0**2+d7*bprp*A0
def F3(bprp): return np.poly1d(
[e1, e2, e3, e4][::-1])(bprp)+e5*A0+e6*A0**2+e7*bprp*A0
xind = np.isfinite(bp+rp+G)
curbp = bp-rp
for i in range(maxnit):
AG = F1(curbp)*A0
Abp = F2(curbp)*A0
Arp = F3(curbp)*A0
curbp1 = bp-rp-Abp+Arp
delta = np.abs(curbp1-curbp)[xind]
curbp = curbp1
print(scipy.stats.scoreatpercentile(delta[np.isfinite(delta)], 99))
AG = F1(curbp)*A0
Abp = F2(curbp)*A0
Arp = F3(curbp)*A0
return AG, Abp, Arp
def matrixize(data1, data2, err1, err2):
"""
vectorize the 2 pieces of data into a 2D mean and 2D covariance matrix
"""
X = np.vstack([data1, data2]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([err1**2., err2**2.]).T
return X, Xerr
def plotXdgmm(xdgmm, ax, c='k', lw=1, label='prior', step=0.001):
ts = np.arange(0, 2. * np.pi, step) #magic
amps = xdgmm.weights
mus = xdgmm.mu
Vs = xdgmm.V
for gg in range(xdgmm.n_components):
if amps[gg] == np.max(amps):
label=label
else:
label=None
w, v = np.linalg.eigh(Vs[gg])
points = np.sqrt(w[0]) * (v[:, 0])[:,None] * (np.cos(ts))[None, :] + np.sqrt(w[1]) * (v[:, 1])[:,None] * (np.sin(ts))[None, :] + mus[gg][:, None]
ax.plot(points[0,:], points[1,:], c, lw=lw, alpha=amps[gg]/np.max(amps), rasterized=True, label=label)
def lnprior(theta, xdgmm, I=np.zeros((1, 2, 2))):
hw2_model, mw2_model, Ak = theta
if (-5 <= Ak) & (Ak<=5):
return xdgmm.score_samples(np.vstack([hw2_model, mw2_model]).T, I)[0]
return -np.inf
def lnprob(theta, Y, Y_err, xdgmm):
lp = lnprior(theta, xdgmm)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, Y, Y_err)
def lnlike(theta, Y, Y_err):
Aw2_Ak = 0.43
(hw2_model, mw2_model, Ak) = theta
Y_model = np.vstack([hw2_model + Ak/0.918, mw2_model + Ak*Aw2_Ak]).T
inv_sigma_2 = np.linalg.inv(Y_err**2)
result = -0.5*(np.sum(np.dot(np.dot(Y - Y_model, inv_sigma_2[0]), np.transpose(Y - Y_model)) +
np.log(np.linalg.det(Y_err[0]**2))))
return result
nll = lambda *args: -lnlike(*args)
nlp = lambda *args: -lnprob(*args)
def get_arrays():
try:
data = np.load('highlatStars.npz')
return data['color'], data['absmag'], data['colorErr'], data['absmagErr']
except IOError:
datahigh = Table.read('dustHighLat-result.fits.gz')
datalow = Table.read('dustLowLat-result.fits.gz')
data = vstack((datahigh, datalow))
c = coord.SkyCoord(data['ra'], data['dec'], distance=1./data['parallax']/u.mas*u.kpc)
galc = c.transform_to(coord.Galactic)
galactic = c.transform_to(coord.Galactocentric)
highlat = np.abs(galc.b) > 45*u.deg
absmag = data['w2mpro'] - 5.*np.log10(1./(data['parallax']/1e2))
color = data['h_m'] - data['w2mpro']
sfddust, bayesdust = dust(data['ra'], data['dec'], 1./data['parallax']/u.mas*u.kpc, max_samples=10,
mode='median')
colorErr = np.sqrt(data['h_msigcom']**2 + data['w2mpro_error']**2.)
absmagErr = data['w2mpro_error']
indices = highlat & (absmag < 2) & (sfddust < 0.05)
print(np.sum(indices))
np.savez('highlatStars', color=color[indices],
absmag=absmag[indices],
colorErr = colorErr[indices],
absmagErr = absmagErr[indices])
return color[indices], absmag[indices], colorErr[indices], absmagErr[indices], data['ra'][indices], data['dec'][indices], data['parallax'][indices]
color, absmag, colorErr, absmagErr, ra, dec, parallax = get_arrays()
X, Xerr = matrixize(color, absmag, colorErr, absmagErr)
ncomp = 256
try:
xdgmm = XDGMM(filename='rjce_lowdust_{0}G.fits'.format(ncomp))
except IOError:
xdgmm = XDGMM(method='Bovy')
xdgmm.n_components = ncomp
xdgmm = xdgmm.fit(X, Xerr)
xdgmm.save_model('rjce_lowdust_{0}G.fits'.format(ncomp))
m = sys.argv[1]
n = sys.argv[2]
nthreads = sys.argv[3]
hw2 = np.zeros((n-m, 3))
mw2 = np.zeros((n-m, 3))
Ak = np.zeros((n-m, 3))
for i in np.arange(m, n):
print(i)
Y, Y_err = matrixize(color[i],
absmag[i],
colorErr[i],
absmagErr[i])
Ak_guess = 0.01
hw2_guess = color[i]
mw2_guess = absmag[i]
result = op.minimize(nlp, [hw2_guess, mw2_guess, Ak_guess],
args=(Y, Y_err, xdgmm))
ndim, nwalkers = 3, 50
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(Y, Y_err, xdgmm), threads=nthreads)
sampler.run_mcmc(pos, 200)
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
hw2_mcmc, mw2_mcmc, ak_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
hw2[i-m] = hw2_mcmc
mw2[i-m] = mw2_mcmc
Ak[i-m] = ak_mcmc
np.savez('posteriorSamples_{0}_{1}'.format(n, m), hw2=hw2, mw2=mw2, Ak=Ak, ra=ra, dec=dec, parallax=parallax)
print(hw2, mw2, Ak)
|
{"hexsha": "a8e08c874e9b933b61e12b4dc019ca654de35b67", "size": 7361, "ext": "py", "lang": "Python", "max_stars_repo_path": "GPdust.py", "max_stars_repo_name": "andersdot/dust", "max_stars_repo_head_hexsha": "44460ac2e8bf46db5ab27862bf775cf93531742e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GPdust.py", "max_issues_repo_name": "andersdot/dust", "max_issues_repo_head_hexsha": "44460ac2e8bf46db5ab27862bf775cf93531742e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GPdust.py", "max_forks_repo_name": "andersdot/dust", "max_forks_repo_head_hexsha": "44460ac2e8bf46db5ab27862bf775cf93531742e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0833333333, "max_line_length": 187, "alphanum_fraction": 0.5900013585, "include": true, "reason": "import numpy,import scipy,from scipy,import astropy,from astropy", "num_tokens": 2566}
|
import cv2
import numpy as np
from random import randint
from copy import deepcopy
color = (255, 255, 0)
font = cv2.FONT_HERSHEY_DUPLEX
fontColor = (255, 255, 0)
def draw_count(frame, crowd_count, ignore_polys=[], gt_count=None, alpha=0.5):
"""
:param ignore_polys: list of polygons, each polygon being a list of tuples of (x,y) containing at least 3 points in one polygon.
"""
frame_h = frame.shape[0]
frame_w = frame.shape[1]
fontScale = max(int(frame_w / 525), 1)
fontThickness = int(fontScale * 2)
if gt_count:
text = "NDPeeps Counted: {} (GT:{})".format(crowd_count, gt_count)
else:
text = "NDPeeps Counted: {}".format(crowd_count)
overlay = deepcopy(frame)
for poly in ignore_polys:
cv2.fillPoly(overlay, [np.array(poly, dtype=np.int32)], (0, 0, 0))
# cv2.fillConvexPoly(overlay, np.array([[0,0], [0,200], [frame_w-1,200], [frame_w-1, 0],[0,0]]), (0,0,0), -1)
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
# print('Drawing {}'.format(text))
# print(fontScale)
cv2.putText(frame, text, (20, frame_h - 20), font, fontScale, (255, 255, 255), fontThickness)
# cv2.putText(frame, text, (20,150), font, fontScale, fontColor, fontThickness)
|
{"hexsha": "47af1240f6410827cfb6b5867f2a6cc5012103e3", "size": 1255, "ext": "py", "lang": "Python", "max_stars_repo_path": "drawer.py", "max_stars_repo_name": "asanakoy/lsc-cnn", "max_stars_repo_head_hexsha": "d389e3598c5ec34254200160c0c4a1904e61eb1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "drawer.py", "max_issues_repo_name": "asanakoy/lsc-cnn", "max_issues_repo_head_hexsha": "d389e3598c5ec34254200160c0c4a1904e61eb1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drawer.py", "max_forks_repo_name": "asanakoy/lsc-cnn", "max_forks_repo_head_hexsha": "d389e3598c5ec34254200160c0c4a1904e61eb1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9189189189, "max_line_length": 133, "alphanum_fraction": 0.6501992032, "include": true, "reason": "import numpy", "num_tokens": 399}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.