blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
05b12ddff71d66f009d85c792d36e5ddec5eebde
|
8de79ab1818c535dcd8ad6e0c92b5c9642ffb82a
|
/doc/development/tutorials/examples/helloworld.py
|
d6d81fd4f765afd5f212cc41339d0180c3e89e8b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sphinx-doc/sphinx
|
632d75bfc7bef14904f3d847e6de6d37594a13a5
|
eab54533a56119c5badd5aac647c595a9adae720
|
refs/heads/master
| 2023-08-16T18:21:54.073511
| 2023-08-15T17:36:47
| 2023-08-15T17:36:47
| 28,710,753
| 6,138
| 2,587
|
NOASSERTION
| 2023-09-14T14:22:28
| 2015-01-02T10:53:28
|
Python
|
UTF-8
|
Python
| false
| false
| 400
|
py
|
helloworld.py
|
from docutils import nodes
from docutils.parsers.rst import Directive
class HelloWorld(Directive):
def run(self):
paragraph_node = nodes.paragraph(text='Hello World!')
return [paragraph_node]
def setup(app):
app.add_directive("helloworld", HelloWorld)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
c0f9ee319ff63ed6626d28a828b9882816e6146f
|
75fab6855981ebb90264f8a81b044295606d67d2
|
/flask/ssti/src/app.py
|
ff72d1f5fe4cd10def292c4f151fe80242e8eea2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
vulhub/vulhub
|
395262f51db7d6ca61f8f3cfa46ae1953bf5a775
|
6a142caa19620bffa4cda9989697afd5b4136c87
|
refs/heads/master
| 2023-08-10T07:01:35.605206
| 2023-08-08T09:59:24
| 2023-08-08T09:59:24
| 87,699,760
| 14,539
| 4,414
|
MIT
| 2023-09-14T03:05:28
| 2017-04-09T10:13:13
|
Dockerfile
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
app.py
|
from flask import Flask, request
from jinja2 import Template
app = Flask(__name__)
@app.route("/")
def index():
name = request.args.get('name', 'guest')
t = Template("Hello " + name)
return t.render()
if __name__ == "__main__":
app.run()
|
d9899afe9334a86517992c90020b6cf8a4e9ded5
|
c56f0964f5e42e7cc6bfba6b0e305fa3936f395e
|
/robots/LoCoBot/locobot_control/nodes/pointrobot3factor_ros_server.py
|
03feab130b29438061358efee8e9889f3134ee8f
|
[
"MIT"
] |
permissive
|
facebookresearch/pyrobot
|
4c702d1d01d1e11e8ff5b3a8d5783713ba29291a
|
b334b60842271d9d8f4ed7a97bc4e5efe8bb72d6
|
refs/heads/main
| 2023-07-30T04:35:22.664168
| 2022-03-08T18:09:11
| 2022-03-08T18:09:11
| 168,630,156
| 2,314
| 360
|
MIT
| 2023-07-11T01:00:04
| 2019-02-01T02:28:00
|
Python
|
UTF-8
|
Python
| false
| false
| 14,763
|
py
|
pointrobot3factor_ros_server.py
|
#!/usr/bin/env python
#!/usr/bin/env python
import numpy as np
from gtsam import *
from gpmp2 import *
import threading
import copy
import actionlib
import sys
import math
from scipy import ndimage
import rospy
import tf
from geometry_msgs.msg import Twist, Pose, PoseStamped
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry, OccupancyGrid
from control_msgs.msg import (
FollowJointTrajectoryAction,
FollowJointTrajectoryGoal,
)
from trajectory_msgs.msg import JointTrajectoryPoint
# TOPIC NAMES
odom_topic = "/odom"
cmd_vel_topic = "/cmd_vel_mux/input/navi"
action_topic = "/gpmp_ctrl"
occ_grid_topic = "/move_base/local_costmap/costmap"
class RobotState(object):
"""A simple robot state object to keep track of locobot's base state"""
def __init__(self):
self.pose = None
self.vel = None
class Robot(object):
"""
A simple locobot interface object which is PyRobot independent.
Functions:
Subscribes to robot state,
Subscribe to occupancy grid and builds GPMP SDF
Publishes gpmp trajectory to the trajectory action server.
"""
def __init__(self):
self.ctrl_pub = rospy.Publisher(cmd_vel_topic, Twist, queue_size=1)
self.state = RobotState()
self.sdf = None
self.sdf_lock = threading.RLock()
rospy.Subscriber(odom_topic, Odometry, self._odometry_callback)
rospy.Subscriber(occ_grid_topic, OccupancyGrid, self._occ_grid_callback)
self.traj_client_ = actionlib.SimpleActionClient(
"/turtle/base_controller/trajectory", FollowJointTrajectoryAction,
)
server_up = self.traj_client_.wait_for_server(timeout=rospy.Duration(10.0))
if not server_up:
rospy.logerr(
"Timed out waiting for Joint Trajectory"
" Action Server to connect. Start the action server"
" before running example."
)
rospy.signal_shutdown("Timed out waiting for Action Server")
sys.exit(1)
def _occ_grid_callback(self, msg):
cols = msg.info.width
rows = msg.info.height
origin_x = msg.info.origin.position.x
origin_y = msg.info.origin.position.y
cell_size = msg.info.resolution
occ_map = np.zeros((rows, cols))
for i in range(rows):
for j in range(cols):
k = i * cols + j
if msg.data[k] > 0:
occ_map[i][j] = 1
else:
occ_map[i][j] = 0
# Signed Distance field
origin_point2 = Point2(origin_x, origin_y)
field = signedDistanceField2D(occ_map, cell_size)
sdf = PlanarSDF(origin_point2, cell_size, field)
self.sdf_lock.acquire()
self.sdf = sdf
self.sdf_lock.release()
def _odometry_callback(self, msg):
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
orientation_q = msg.pose.pose.orientation
orientation_list = [
orientation_q.x,
orientation_q.y,
orientation_q.z,
orientation_q.w,
]
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(orientation_list)
x_vel = msg.twist.twist.linear.x
y_vel = 0.0
ang_vel = msg.twist.twist.angular.z
self.state.pose = np.asarray([x, y, yaw])
self.state.vel = np.asarray([x_vel, y_vel, ang_vel])
def stop(self):
rospy.loginfo("Stopping base!")
msg = Twist()
msg.linear.x = 0
msg.angular.z = 0
self.ctrl_pub.publish(msg)
def get_robot_state(self):
return self.state.pose, self.state.vel
def get_local_frame_vels(self, temp_vel, yaw):
# temp_vel is velocity in global frame.
vel = np.asarray([1.0,1.0,1.0])
vel[0] = temp_vel[0] * np.cos(yaw) + temp_vel[1]* np.sin(yaw)
vel[1] = -1.0 * temp_vel[0] * np.sin(yaw) + temp_vel[1] * np.cos(yaw)
vel[2] = temp_vel[2]
return vel
def executeTrajectory(self, result, params):
DOF = 3
traj_ = FollowJointTrajectoryGoal()
point = JointTrajectoryPoint()
for i in range(params.total_time_step):
pose = result.atVector(symbol(ord("x"), i))
vel = self.get_local_frame_vels(result.atVector(symbol(ord("v"), i)),
pose[2])
point = JointTrajectoryPoint()
for j in range(3):
point.positions.append(pose[j])
point.velocities.append(vel[j])
point.time_from_start = rospy.Duration(
i * params.delta_t
) # /(check_inter+1))
traj_.trajectory.points.append(point)
traj_.trajectory.header.stamp = rospy.Time.now()
self.traj_client_.cancel_goal()
self.traj_client_.send_goal(traj_)
# self.traj_client_.wait_for_result()
def signedDistanceField2D(ground_truth_map, cell_size):
# SIGNEDDISTANCEFIELD2D 2D signed distance field
# Given a ground truth 2D map defined in Matrix in 0-1,
# calculate 2D signed distance field, which is defined as a matrix
# map matrix and signed distance field matrix have the same resolution.
#
# Usage: field = SIGNEDDISTANCEFIELD2D(ground_truth_map, cell_siz)
# @map evidence grid from dataset, map use 0 show open area, 1 show objects.
# @cell_size cell sizeto given metric information
#
# Output:
# @field sdf, row is Y, col is X
# regularize unknow area to open area
cur_map = ground_truth_map > 0.75
cur_map = cur_map.astype(int)
if np.amax(cur_map) is 0:
return np.ones(ground_truth_map.shape) * 1000
# inverse map
inv_map = 1 - cur_map
# get signed distance from map and inverse map
# since bwdist(foo) = ndimage.distance_transform_edt(1-foo)
map_dist = ndimage.distance_transform_edt(inv_map)
inv_map_dist = ndimage.distance_transform_edt(cur_map)
field = map_dist - inv_map_dist
# metric
field = field * cell_size
field = field.astype(float)
return field
def get_plan(start_conf_val, start_vel, end_conf_val, end_vel, sdf, params):
start_conf = start_conf_val
end_conf = end_conf_val
avg_vel = (end_conf_val - start_conf_val / params.total_time_step) / params.delta_t
# plot param
# init optimization
graph = NonlinearFactorGraph()
init_values = Values()
for i in range(0, params.total_time_step + 1):
key_pos = symbol(ord("x"), i)
key_vel = symbol(ord("v"), i)
#% initialize as straight line in conf space
pose = start_conf_val
vel = avg_vel
init_values.insert(key_pos, pose)
init_values.insert(key_vel, vel)
#% start/end priors
if i == 0:
graph.push_back(PriorFactorVector(key_pos, start_conf, params.pose_fix))
graph.push_back(PriorFactorVector(key_vel, start_vel, params.vel_fix))
elif i == params.total_time_step:
graph.push_back(PriorFactorVector(key_pos, end_conf, params.pose_fix_goal))
graph.push_back(PriorFactorVector(key_vel, end_vel, params.vel_fix_goal))
graph.add(VehicleDynamicsFactorVector(key_pos, key_vel, params.cost_sigma))
# GP priors and cost factor
if i > 0:
# graph.push_back(PriorFactorVector(key_pos, end_conf, params.pose_fix_goal))
# graph.push_back(PriorFactorVector(key_vel, end_vel, params.vel_fix_goal))
key_pos1 = symbol(ord("x"), i - 1)
key_pos2 = symbol(ord("x"), i)
key_vel1 = symbol(ord("v"), i - 1)
key_vel2 = symbol(ord("v"), i)
temp = GaussianProcessPriorLinear(
key_pos1, key_vel1, key_pos2, key_vel2, params.delta_t, params.Qc_model
)
graph.push_back(temp)
#% cost factor
graph.push_back(
ObstaclePlanarSDFFactorPointRobot(
key_pos,
params.pR_model,
sdf,
params.cost_sigma,
params.epsilon_dist,
)
)
#% GP cost factor
if params.use_GP_inter and params.check_inter > 0:
for j in range(1, params.check_inter + 1):
tau = j * (params.total_time_sec / params.total_check_step)
graph.add(
ObstaclePlanarSDFFactorGPPointRobot(
key_pos1,
key_vel1,
key_pos2,
key_vel2,
params.pR_model,
sdf,
params.cost_sigma,
params.epsilon_dist,
params.Qc_model,
params.delta_t,
tau,
)
)
if params.use_trustregion_opt:
parameters = DoglegParams()
optimizer = DoglegOptimizer(graph, init_values, parameters)
else:
parameters = GaussNewtonParams()
optimizer = GaussNewtonOptimizer(graph, init_values, parameters)
print("Initial Error = %d\n", graph.error(init_values))
optimizer.optimizeSafely()
result = optimizer.values()
print("Final Error = %d\n", graph.error(result))
res_flag = True
if graph.error(result) > params.acceptable_error_threshold:
res_flag = False
return result, res_flag
class Parameters(object): # TODO: read from yaml file or rosparams
# settings
total_time_sec = 5.0
total_time_step = 50
total_check_step = 50.0
delta_t = total_time_sec / total_time_step
check_inter = int(total_check_step / total_time_step - 1)
use_GP_inter = True
# point robot model
pR = PointRobot(3, 1)
spheres_data = np.asarray([0.0, 0.0, 0.0, 0.0, 1.5])
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
sphere_vec.push_back(
BodySphere(spheres_data[0], spheres_data[4], Point3(spheres_data[1:4]))
)
pR_model = PointRobotModel(pR, sphere_vec)
# GP
Qc = np.identity(pR_model.dof())
Qc_model = noiseModel_Gaussian.Covariance(Qc)
# Obstacle avoid settings
cost_sigma = 0.005
epsilon_dist = 0.1
# prior to start/goal
pose_fix = pose_fix_goal = noiseModel_Isotropic.Sigma(pR_model.dof(), 0.0001)
vel_fix = vel_fix_goal = noiseModel_Isotropic.Sigma(pR_model.dof(), 0.0001)
use_trustregion_opt = True
pause_time = total_time_sec / total_time_step
# Fixed window params
goal_region_threshold = 0.1
acceptable_error_threshold = 400
sigma_goal = 4
opt_timeout = 0.2
class GPMPController(object):
"""docstring for GPMPController"""
def __init__(self, robot, params, action_name):
self.robot = robot
self.params = params
self._action_name = action_name
# Action server for the pyrobot client
self._as = actionlib.SimpleActionServer(
self._action_name,
FollowJointTrajectoryAction,
execute_cb=self.execute_cb,
auto_start=False,
)
self._as.start()
def execute_cb(self, goal):
# start and end conf
end_conf_val = np.asarray(goal.trajectory.points[0].positions)
end_vel = np.asarray(goal.trajectory.points[0].velocities)
goal_region_threshold = goal.goal_tolerance[0].position
duration = goal.goal_time_tolerance.to_sec()
print("Received goal", end_conf_val, end_vel)
start_time = rospy.get_time()
curstate_val, curstate_vel = self.robot.get_robot_state()
init_distance = np.linalg.norm(curstate_val - end_conf_val)
while np.linalg.norm(curstate_val - end_conf_val) > goal_region_threshold:
# Timeout
if rospy.get_time() - start_time > duration:
rospy.logerr(
"The controller timedout trying to reach the goal."
" Consider increasing the time"
)
self.robot.traj_client_.cancel_goal()
self.robot.stop()
self._as.set_aborted()
return
if self._as.is_preempt_requested():
rospy.logwarn(
"############## %s: Preempted ####################"
% self._action_name
)
self._as.set_preempted()
# Note: The trajectory is not cancelled for preempt as updated trajectory would be given
return
# Goal prior factors
self.params.pose_fix_goal = noiseModel_Isotropic.Sigma(
3,
self.params.sigma_goal
* np.linalg.norm(curstate_val - end_conf_val)
/ init_distance,
)
self.params.vel_fix_goal = noiseModel_Isotropic.Sigma(
3,
self.params.sigma_goal
* np.linalg.norm(curstate_val - end_conf_val)
/ init_distance,
)
self.robot.sdf_lock.acquire()
sdf = self.robot.sdf
self.robot.sdf_lock.release()
result, res_flag = get_plan(
curstate_val, curstate_vel, end_conf_val, end_vel, sdf, self.params
)
if not res_flag:
rospy.logerr("GPMP optimizer failed to produce an acceptable plan")
self.robot.traj_client_.cancel_goal()
self.robot.stop()
self._as.set_aborted()
return
self.robot.executeTrajectory(result, self.params)
rospy.sleep(0.5)
curstate_val, curstate_vel = self.robot.get_robot_state()
print("Current State: ", curstate_val, curstate_vel)
print("Error", np.linalg.norm(curstate_val - end_conf_val))
self.robot.traj_client_.wait_for_result() # TODO: Absorb this into the treshold
self._as.set_succeeded()
def main():
try:
rospy.init_node("gpmp_controller_server", anonymous=True)
except rospy.exceptions.ROSException:
rospy.logwarn("ROS node [gpmp_controller] has already been initialized")
robot = Robot()
while robot.sdf is None:
rospy.logwarn("Waiting for robot SDF!!")
rospy.sleep(0.2)
params = Parameters()
gpmp_controller = GPMPController(robot, params, "/gpmp_controller")
rospy.spin()
if __name__ == "__main__":
main()
|
276e8dd07a1cb6b8cde84c248a408304215e7bce
|
642ba1746fed0b722a127b8426eca987df6efc61
|
/docs/courses/code/initbatser.py
|
d48046a616c1b788f23e34243ca9bb04f5ef8a53
|
[
"BSD-3-Clause"
] |
permissive
|
neuronsimulator/nrn
|
23781d978fe9253b0e3543f41e27252532b35459
|
b786c36d715ba0f6da1ba8bdf5d2338c939ecf51
|
refs/heads/master
| 2023-08-09T00:13:11.123525
| 2023-08-04T13:11:02
| 2023-08-04T13:11:02
| 71,627,569
| 313
| 171
|
NOASSERTION
| 2023-09-14T17:48:03
| 2016-10-22T08:47:37
|
C++
|
UTF-8
|
Python
| false
| false
| 2,464
|
py
|
initbatser.py
|
"""
Serial batch implementation
for stimulus current i iterating over a range of values
run a simulation and report spike frequency
save i & corresponding f to a file
optionally plot fi curve
"""
from neuron import h, gui
import time
# Simulation parameters
PLOTRESULTS = True # if True, generates a graph that shows f-i curve
h.tstop = 500 # ms, more than long enough for 15 spikes at ISI = 25 ms
AMP0 = 0.1 # nA -- minimum stimulus
D_AMP = 0.02 # nA -- stim increment between runs
NRUNS = 30
# model specification
from cell import Cell
cell = Cell()
# instrumentation
# experimental manipulations
stim = h.IClamp(cell.soma(0.5))
stim.delay = 1 # ms
stim.dur = 1e9
stim.amp = 0.1 # nA
def fstimamp(run_id):
"""takes run_id, returns corresponding stimulus amplitude"""
return AMP0 + run_id * D_AMP
def set_params(run_id):
"""sets stimulus amplitude to appropriate value for this run"""
stim.amp = fstimamp(run_id)
# data recording and analysis
# count only those spikes that get to distal end of dend
nc = h.NetCon(cell.dend(1)._ref_v, None, sec=cell.dend)
nc.threshold = -10 # mV
spvec = h.Vector()
nc.record(spvec)
NSETTLE = 5 # ignore the first NSETTLE ISI (allow freq to stablize)
NINVL = 10 # num ISI from which frequency will be calculated
NMIN = NSETTLE + NINVL # ignore recordings with fewer than this num of ISIs
def get_frequency(spvec):
nspikes = spvec.size()
if nspikes > NMIN:
t2 = spvec[-1] # last spike
t1 = spvec[-(1 + NINVL)] # NINVL prior to last spike
return NINVL * 1.0e3 / (t2 - t1)
else:
return 0
#
# Simulation control
#
# batch control
t_start = time.time() # keep track of execution time past this point
def batchrun(n):
stims = []
freqs = []
for run_id in range(n):
set_params(run_id)
h.run()
stims.append(stim.amp)
freqs.append(get_frequency(spvec))
print("Finished %d of %d." % (run_id + 1, n))
return stims, freqs
stims, freqs = batchrun(NRUNS)
#
# Reporting of results
#
def save_results(filename, stims, freqs):
with open(filename, "w") as f:
f.write("label:%s\n%d\n" % ("f", len(freqs)))
for stim, freq in zip(stims, freqs):
f.write("%g\t%g\n" % (stim, freq))
save_results("fi.dat", stims, freqs)
print("%g seconds" % (time.time() - t_start)) # report run time
if PLOTRESULTS:
from plotfi import plotfi
fig = plotfi(stims, freqs)
|
3d84029d093e2a01bb2449d166499eb945b02086
|
d4239425234eacb647c4cc4f2f4c8537b618fca0
|
/onadata/apps/main/tests/test_user_id_string_unique_together.py
|
aec4c9ed8e595d2ae39f24ac80d69a632c630512
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
onaio/onadata
|
58762d6a606870bd13d43fd27fdaa61720a745c2
|
e5bdec91cb47179172b515bbcb91701262ff3377
|
refs/heads/main
| 2023-09-04T03:12:43.388668
| 2023-08-24T07:27:08
| 2023-08-24T07:27:08
| 12,888,897
| 177
| 149
|
NOASSERTION
| 2023-09-13T14:19:05
| 2013-09-17T07:25:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
test_user_id_string_unique_together.py
|
import os
from django.db import IntegrityError
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.logger.models import XForm
class TestUserIdStringUniqueTogether(TestBase):
def test_unique_together(self):
"""
Multiple users can have the same survey, but id_strings of
surveys must be unique for a single user.
"""
self._create_user_and_login()
self.this_directory = os.path.dirname(__file__)
xls_path = os.path.join(self.this_directory,
"fixtures", "gps", "gps.xlsx")
# first time
self._publish_xls_file(xls_path)
self.assertEqual(XForm.objects.count(), 1)
# second time
self.assertRaises(IntegrityError, self._publish_xls_file, xls_path)
self.assertEqual(XForm.objects.count(), 1)
self.client.logout()
# first time
self._create_user_and_login(username="carl", password="carl")
self._publish_xls_file(xls_path)
self.assertEqual(XForm.objects.count(), 2)
|
d92c648a0a66bf8716e1ddc7908dd1b94689300a
|
88dda5e76cef286c7db3ae7e5d1a32d28f7815a3
|
/reviewboard/webapi/resources/base_watched_object.py
|
e2f7766f4a5ce84b2cb926010bee99c906422e1c
|
[
"MIT"
] |
permissive
|
reviewboard/reviewboard
|
f4d3bada08ba9d6ef53add2d1fdb82bd6cc63a1e
|
c3a991f1e9d7682239a1ab0e8661cee6da01d537
|
refs/heads/master
| 2023-08-31T09:03:14.170335
| 2023-08-30T08:22:43
| 2023-08-30T08:22:43
| 285,304
| 1,141
| 353
|
MIT
| 2023-06-07T16:51:02
| 2009-08-22T21:39:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,231
|
py
|
base_watched_object.py
|
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from djblets.webapi.fields import StringFieldType
from reviewboard.accounts.models import Profile
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import (webapi_check_local_site,
webapi_check_login_required)
from reviewboard.webapi.resources import resources
class BaseWatchedObjectResource(WebAPIResource):
"""A base resource for objects watched by a user."""
watched_resource = None
uri_object_key = 'watched_obj_id'
profile_field = None
star_function = None
unstar_function = None
allowed_methods = ('GET', 'POST', 'DELETE')
@property
def uri_object_key_regex(self):
return self.watched_resource.uri_object_key_regex
def get_queryset(self, request, username, local_site_name=None,
*args, **kwargs):
try:
local_site = self._get_local_site(local_site_name)
if local_site:
user = local_site.users.get(username=username)
profile = user.get_profile()
else:
profile = Profile.objects.get(user__username=username)
q = self.watched_resource.get_queryset(
request, local_site_name=local_site_name, *args, **kwargs)
q = q.filter(starred_by=profile)
return q
except Profile.DoesNotExist:
return self.watched_resource.model.objects.none()
@webapi_check_login_required
def get(self, request, watched_obj_id, *args, **kwargs):
try:
q = self.get_queryset(request, *args, **kwargs)
obj = self.get_watched_object(q, watched_obj_id, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return HttpResponseRedirect(
self.watched_resource.get_href(obj, request, *args, **kwargs))
@webapi_check_login_required
@webapi_response_errors(DOES_NOT_EXIST)
def get_list(self, request, *args, **kwargs):
# TODO: Handle pagination and ?counts-only=1
try:
objects = [
self.serialize_object(obj)
for obj in self.get_queryset(request, is_list=True,
*args, **kwargs)
]
return 200, {
self.list_result_key: objects,
}
except User.DoesNotExist:
return DOES_NOT_EXIST
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(required={
'object_id': {
'type': StringFieldType,
'description': 'The ID of the object to watch.',
},
})
def create(self, request, object_id, *args, **kwargs):
try:
obj_kwargs = kwargs.copy()
obj_kwargs[self.watched_resource.uri_object_key] = object_id
obj = self.watched_resource.get_object(request, *args,
**obj_kwargs)
user = resources.user.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.user.has_modify_permissions(request, user,
*args, **kwargs):
return self.get_no_access_error(request)
profile = request.user.get_profile()
star = getattr(profile, self.star_function)
star(obj)
return 201, {
self.item_result_key: obj,
}
@webapi_check_local_site
@webapi_login_required
def delete(self, request, watched_obj_id, *args, **kwargs):
try:
obj_kwargs = kwargs.copy()
obj_kwargs[self.watched_resource.uri_object_key] = watched_obj_id
obj = self.watched_resource.get_object(request, *args,
**obj_kwargs)
user = resources.user.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.user.has_modify_permissions(request, user,
*args, **kwargs):
return self.get_no_access_error(request)
profile, profile_is_new = request.user.get_profile(return_is_new=True)
if not profile_is_new:
unstar = getattr(profile, self.unstar_function)
unstar(obj)
return 204, {}
def serialize_object(self, obj, *args, **kwargs):
return {
'id': obj.pk,
self.item_result_key: obj,
}
def get_watched_object(self, queryset, obj_id, *args, **kwargs):
return queryset.get(pk=obj_id)
|
a818c8d04092f7a578ee1ff533f03d4a6a8bfa48
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/formatter/spaceBetweenParenthesesInEmptyArgumentList_after.py
|
b980535f24596fa7e20981ec486be24cc37afda9
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 21
|
py
|
spaceBetweenParenthesesInEmptyArgumentList_after.py
|
func(1, 2)
empty( )
|
eac547f196a988796ea49501515622446fa469f1
|
9240c573719755b67f030b5491ebfeb4d1f352bc
|
/pluggable-architecture/slides/git-stats-code/c_base_plugin.py
|
a6738c9811aa4b7d2e401f562eb222135167b1e8
|
[] |
no_license
|
alysivji/talks
|
46f6586e99af65de9b5d1dc34e2791e885a14d90
|
db63f50d27ab312e832a4314f0fcf874c324d1b8
|
refs/heads/main
| 2023-04-29T22:10:48.315626
| 2023-04-18T17:16:05
| 2023-04-18T17:16:05
| 89,402,889
| 115
| 54
| null | 2022-03-27T15:33:39
| 2017-04-25T20:19:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
c_base_plugin.py
|
from git_stats.download import RepoStatistics
class BasePlugin:
def __init__(self, repo):
self.repo = repo
def __repr__(self):
return f"<{self.__class__.__name__}>"
@staticmethod
def check(domain) -> bool:
raise NotImplementedError
def repo_stats(self) -> RepoStatistics:
raise NotImplementedError
|
640220c1c07c229bda73a7f380c7d9fe89b51819
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/python/phonenumbers/shortdata/region_EG.py
|
a863023e6f9cc84ffd26265624ed1378e1b54be1
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 957
|
py
|
region_EG.py
|
"""Auto-generated file, do not edit by hand. EG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_EG = PhoneMetadata(id='EG', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[13]\\d\\d(?:\\d{2})?', possible_length=(3, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:2[23]|80)', example_number='122', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:2[23]|80)', example_number='122', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:2[23]|[69]\\d{3}|80)|34400', example_number='122', possible_length=(3, 5)),
carrier_specific=PhoneNumberDesc(national_number_pattern='344\\d\\d', example_number='34400', possible_length=(5,)),
sms_services=PhoneNumberDesc(national_number_pattern='344\\d\\d', example_number='34400', possible_length=(5,)),
short_data=True)
|
e12c1e8d27d7a693954abd3d2d63c870ddf28996
|
b60686a2e351a756f249e0d9faab8fe154a08f11
|
/examples/shouldi/shouldi/java/dependency_check.py
|
bc068bf9bbe898754069decefd3c19fc69916c82
|
[
"MIT",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
intel/dffml
|
86483b47229b9b62c9f8dfef51491aa02563347e
|
7d381bf67a72fe1ecb1012393d5726085564cb0e
|
refs/heads/main
| 2023-08-28T00:35:04.219193
| 2023-06-06T18:29:16
| 2023-06-06T18:29:16
| 149,512,216
| 237
| 204
|
MIT
| 2023-05-05T15:39:35
| 2018-09-19T21:06:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,485
|
py
|
dependency_check.py
|
import json
import os
import tempfile
import urllib.parse
from pathlib import Path
from typing import Dict, Any
from dffml import op, Definition, run_command
package_src_dir = Definition(name="package_src_dir", primitive="str")
dependency_check_output = Definition(
name="dependency_check_output", primitive="Dict[str, Any]"
)
class DependencyCheckError(Exception):
"""
Raised when dependency-check fails
"""
@op(
inputs={"pkg": package_src_dir},
outputs={"report": dependency_check_output},
)
async def run_dependency_check(self, pkg: str) -> Dict[str, Any]:
"""
CLI usage: dffml service dev run -log debug shouldi.dependency_check:run_dependency_check -pkg .
"""
with tempfile.TemporaryDirectory() as tempdir:
# Define command
cmd = [
"dependency-check.sh",
"-f",
"JSON",
"--out",
os.path.abspath(tempdir),
]
kwargs = {}
# Dependency check version 6 requires proxy be set explicitly
for env_var in ["HTTPS_PROXY", "https_proxy"]:
if env_var in os.environ:
parse_result = urllib.parse.urlparse(os.environ[env_var])
cmd += [
"--proxyserver",
parse_result.hostname,
"--proxyport",
str(parse_result.port)
if parse_result.port is not None
else "8080",
]
break
# Directory or file to scan
cmd.append("-s")
if Path(pkg).is_file():
cmd.append(os.path.basename(pkg))
kwargs["cwd"] = os.path.dirname(pkg)
else:
cmd.append(".")
kwargs["cwd"] = pkg
# Run command
try:
await run_command(cmd, **kwargs)
except RuntimeError as e:
raise DependencyCheckError from e
with open(
os.path.join(
os.path.abspath(tempdir), "dependency-check-report.json"
)
) as f:
dependency_check_op = json.loads(f.read())
for items in dependency_check_op["dependencies"]:
t_result = items["vulnerabilities"]
final_report = {}
score = 0
for item in t_result:
final_report["name"] = item["name"]
final_report["severity"] = item["severity"]
score += 1
final_report["total_CVE"] = score
return {"report": final_report}
|
fea6c20542f5bae5eba4de98736f2e7ac82b4e85
|
53a83642c01a8828e3d7bd0b18e33c3b694c2b84
|
/Python/GeeksforGeeks/linked-list-insertion.py
|
ce26d82d437c923494cde831256f9f91e69ec452
|
[] |
no_license
|
anantkaushik/Competitive_Programming
|
1dcd60a28b5b951c23024d6090942be081ad249f
|
6dba38fd7aa4e71b5196d01d64e81f9336d08b13
|
refs/heads/master
| 2022-03-06T15:36:23.797340
| 2022-02-21T12:00:37
| 2022-02-21T12:00:37
| 82,700,948
| 271
| 95
| null | 2020-10-27T17:34:39
| 2017-02-21T16:18:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,209
|
py
|
linked-list-insertion.py
|
"""
Problem Link: https://practice.geeksforgeeks.org/problems/linked-list-insertion/1
Given a key (or data) to be inserted into the linked list of size N. The task is to insert the element at head or tail of the linked
list depending on the input just before it p. If p is 0, then insert the element at beginning else insert at end.
Hint : When inserting at the end, make sure that you handle NULL explicitly.
Input Format:
First line of input contains number of testcases T. For each testcase, first line of input contains length of linked list N and next
line contains 2*N integers, each element to be inserted into the list is preceded by a 0 or 1 which decide the place to be inserted.
Output Format:
For each testcase, there will be a single line of output which contains the linked list elements.
Your Task:
This is a function problem. You only need to complete the functions insertAtBeginning and insertAtEnd that returns head after successful
insertion. The printing is done automatically by the driver code.
Constraints:
1 <= T <= 100
1 <= N <= 103
Example:
Input:
3
5
9 0 5 1 6 1 2 0 5 0
3
5 1 6 1 9 1
4
15 0 36 0 95 0 14 0
Output:
5 2 9 5 6
5 6 9
14 95 36 15
Explanation:
Testcase 1: After inserting the elements at required position, we have linked list as 5, 2, 9, 5, 6.
"""
{
#Contributed by : Nagendra Jha
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
#node class
class Node:
def __init__(self,data):
self.data=data
self.next=None
#linked list class
class LinkedList:
def __init__(self):
self.head=None
def printList(self):
if self.head is None:
print(' ')
return
curr_node = self.head
while curr_node:
print(curr_node.data, end=" ")
curr_node = curr_node.next
print(' ')
if __name__ == '__main__':
t=int(input())
for cases in range(t):
n=int(input())
a=LinkedList()
nodes_info=list(map(int,input().strip().split()))
for i in range(0,len(nodes_info)-1,2):
if(nodes_info[i+1]==0):
insertAtBegining(a,nodes_info[i])
else:
insertAtEnd(a,nodes_info[i])
a.printList()
}
''' This is a function problem.You only need to complete the function given below '''
'''
Your task is to complete both
these functions given below.
Function Arguments: a (linked list) and value (value to be inserted)
Return Type: None
'''
'''
class Node:
def __init__(self,data):
self.data=data
self.next=None
'''
# function should insert new node at the
# beigning of the list
def insertAtBegining(a,value):
#code here
node = Node(value)
node.next = a.head
a.head = node
# function should insert new node at the
# end of the list
def insertAtEnd(a,value):
#code here too :)
cur = a.head
if not cur:
a.head = Node(value)
else:
while cur.next:
cur = cur.next
cur.next = Node(value)
|
f6b7ff19e8706f223c46d7381eed78e838b78231
|
c26483bc1399e7879471a9e53d0288cb2c756088
|
/onnxmltools/convert/sparkml/operator_converters/__init__.py
|
db4b8aa2b87f73214fe856d988adad8da33ec353
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnxmltools
|
6782d9e1d2c75be7618b1378405d31198a310027
|
024a62f6915e6c3b9e040befaf058c7e60c271de
|
refs/heads/main
| 2023-09-04T04:57:10.943548
| 2023-08-28T16:43:37
| 2023-08-28T16:43:37
| 121,798,175
| 827
| 189
|
Apache-2.0
| 2023-09-13T16:07:20
| 2018-02-16T20:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
__init__.py
|
# SPDX-License-Identifier: Apache-2.0
# To register converter for sparkml operators, import associated modules here.
from . import bucketed_random_projection_lsh
from . import aft_survival_regression
from . import element_wise_product
from . import min_hash_lsh
from . import word2vec
from . import index_to_string
from . import chi_sq_selector
from . import one_vs_rest
from . import gbt_classifier
from . import dct
from . import pca
from . import polynomial_expansion
from . import tokenizer
from . import naive_bayes
from . import vector_slicer
from . import stop_words_remover
from . import bucketizer
from . import random_forest_classifier
from . import random_forest_regressor
from . import decision_tree_regressor
from . import decision_tree_classifier
from . import vector_indexer
from . import linear_regressor
from . import imputer
from . import scaler
from . import normalizer
from . import binarizer
from . import string_indexer
from . import linear_classifier
from . import onehot_encoder
from . import vector_assembler
from . import k_means
from . import count_vectorizer
from . import mlp_classifier
|
2ed511d7341930a13e94cc4146d7c8bd2a8e7034
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.LogFormatter.1.py
|
58de7a6b308be0c7864e3f844c1a708e9f380b37
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
wx.LogFormatter.1.py
|
class LogFormatterWithThread(wx.LogFormatter):
def Format(level, msg, info):
return "[%d] %s(%d) : %s" % \
(info.threadId, info.filename, info.line, msg)
|
9b26e12360d00d28eb2f98e7f0ba492e5117511c
|
f1190adf3f20c17508d9791fd79cb59470ee5d8e
|
/tests/test_renders.py
|
14949cbd5359e6277faebdb971033646f9cc4d8c
|
[
"MIT"
] |
permissive
|
kinegratii/django-echarts
|
38c42d6cd2078d01220b981fd7322b59d54f4fc5
|
12de6cf4faab1ecbbc06bd487727cc16791ee556
|
refs/heads/master
| 2023-03-18T02:26:11.916961
| 2023-03-17T02:33:34
| 2023-03-17T02:33:34
| 98,728,999
| 264
| 73
|
MIT
| 2023-03-17T02:33:35
| 2017-07-29T11:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,526
|
py
|
test_renders.py
|
import os
import unittest
from borax.htmls import HTMLString
from django import setup
from django_echarts.entities import (
Title, LinkItem, ChartInfo, bootstrap_table_class, material_table_class, Message, ValueItem, RowContainer,
ElementEntity
)
from django_echarts.renders import render_widget
from pyecharts.charts import Bar
from pyecharts.components import Table
import htmlgenerator as hg
class TableCssTestCase(unittest.TestCase):
def test_table_css(self):
self.assertEqual(
'table table-responsive table-bordered table-striped table-md',
bootstrap_table_class(border=True, striped=True, size='md'))
self.assertEqual(
'table table-responsive table-borderless table-striped table-md',
bootstrap_table_class(borderless=True, striped=True, size='md'))
self.assertEqual('responsive-table striped centered', material_table_class(striped=True, center=True))
class RenderTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings_mock'
setup(set_prefix=False)
def test_settings(self):
from django_echarts.conf import DJANGO_ECHARTS_SETTINGS
self.assertEqual('bootstrap5', DJANGO_ECHARTS_SETTINGS.theme.name)
def test_render_default(self):
# with self.assertRaises(TypeError):
# render_widget(object)
info = ChartInfo('DemoInfo')
self.assertIn('DemoInfo', render_widget(info))
self.assertEqual('xxx', render_widget(HTMLString('xxx')))
def test_render_chart(self):
bar = Bar()
bar.width = 200
self.assertIn('200px', render_widget(bar))
table = Table()
self.assertIn('</div>', render_widget(table))
def test_basic_render(self):
self.assertEqual('', render_widget(None))
tw = Title('DemoTitle')
self.assertTrue('DemoTitle' in render_widget(tw))
link = LinkItem('demo', url='https://www.baidu.com', new_page=True)
html = render_widget(link)
self.assertIn('target="_blank"', html)
def test_render_container(self):
rc = RowContainer()
rc.add_widget(ValueItem(45, 'Today Value'))
rc.add_widget(Message('xxF'))
rc.set_spans(6)
self.assertTupleEqual((6, 6), rc.get_spans())
def test_render_element_entity(self):
ee = ElementEntity('div', style_width='200px', style_height='100px')
html_str = render_widget(ee)
self.assertIn('width:200px', html_str)
self.assertIn('height:100px', html_str)
class HTMLGeneratorTestCase(unittest.TestCase):
def test_html(self):
widget = hg.DIV('Div text', _class='ss')
self.assertIn('ss', render_widget(widget))
class TemplateTagsTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings_mock'
setup(set_prefix=False)
from django.template import engines
cls.django_engine = engines['django']
def test_tags(self):
tw = Title('DemoTitle')
template_obj = self.django_engine.from_string('{% load echarts %}{% dw_widget widget %}')
result = template_obj.render({'widget': tw})
self.assertIn('DemoTitle', result)
def test_html_generator_widget(self):
widget = hg.H1('The First Title')
template_obj = self.django_engine.from_string('{% load echarts %}{% dw_widget widget %}')
result = template_obj.render({'widget': widget})
self.assertIn('The First Title', result)
def test_theme(self):
template_obj = self.django_engine.from_string('{% load echarts %}{% theme_js %} {% theme_css %}')
result = template_obj.render()
self.assertIn('link', result)
self.assertIn('script', result)
def test_init_echarts(self):
bar = Bar()
template_obj = self.django_engine.from_string(' '.join([
'{% load echarts %}{% echarts_js_content widget %}',
'{% echarts_container widget %}',
'{% echarts_js_dependencies widget %}'
]))
result = template_obj.render({'widget': bar})
self.assertIn('getElementById', result)
self.assertIn('echarts.min.js', result)
template_obj2 = self.django_engine.from_string('{% load echarts %}{% echarts_js_content_wrap widget %}')
result2 = template_obj2.render({'widget': bar})
self.assertIn('getElementById', result2)
|
d7a9bb59ac95b9738fd030b6e65bb8a3c80a0aff
|
bb1977851e40ee4c0ff124437515c5f21148b820
|
/tests/test_ops.py
|
0a753baf9ffa3b103617609dc3748aad836542f8
|
[
"MIT"
] |
permissive
|
open2c/bioframe
|
135df8f94599817f7f5434850d6108651268a80a
|
f208fe63e0d93c9e35a233bd02d07b1fc3545954
|
refs/heads/main
| 2023-07-06T09:57:54.209112
| 2023-07-03T18:12:13
| 2023-07-03T18:12:13
| 69,901,992
| 108
| 10
|
MIT
| 2023-06-30T23:48:44
| 2016-10-03T19:09:54
|
Python
|
UTF-8
|
Python
| false
| false
| 54,112
|
py
|
test_ops.py
|
from io import StringIO
import numpy as np
import pandas as pd
import pytest
import bioframe
import bioframe.core.checks as checks
from bioframe.core.construction import make_viewframe
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end",
# "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
# non-default columns in view
funky_view = view_df.rename(columns={"chrom": "chr"})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=funky_view,
df_view_col="view_region",
cols_view=["chr", "start", "end"],
),
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_expand_amount_args():
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
with pytest.raises(ValueError):
bioframe.expand(df, pad=10, scale=2.0)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert not df1.equals(df2)
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon
# creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX
# should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1"],
["chr1", 10, 12, "chr1"],
["chr1", 14, 100, "chr1"],
["chrX", 0, 100, "chrX"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
# non-default columns in view
funky_view = make_viewframe(df1_chromsizes, cols=["chr", "start", "end"])
pd.testing.assert_frame_equal(
bioframe.complement(
df1,
view_df=funky_view,
cols_view=["chr", "start", "end"],
),
df1_complement,
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1"],
["chr1", 10, 12, "chr1"],
["chr1", 14, 100, "chr1"],
["chrX", 0, 1, "chrX"],
["chrX", 5, 100, "chrX"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1"],
["chr1", 20, np.iinfo(np.int64).max, "chr1"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = bioframe.make_viewframe(
{"chr1": 15}, name_style="ucsc", view_name_col="VR"
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different
### regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
### closest with direction ###
df1 = pd.DataFrame(
[
["chr1", 3, 5, "+"],
["chr1", 3, 5, "-"],
],
columns=["chrom", "start", "end", "strand"],
)
df2 = pd.DataFrame(
[["chr1", 1, 2],
["chr1", 2, 8],
["chr1", 10, 11]],
columns=["chrom", "start", "end"]
)
### closest(df1, df2, k=1, direction_col="strand") ###
d = """chrom start end strand chrom_ start_ end_ distance
0 chr1 3 5 + chr1 2 8 0
1 chr1 3 5 - chr1 2 8 0
"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, k=1, direction_col="strand")
)
### closest(df1, df2, k=1, ignore_upstream=False, ignore_downstream=True,
### ignore_overlaps=True, direction_col="strand") ###
d = """chrom start end strand chrom_ start_ end_ distance
0 chr1 3 5 + chr1 1 2 1
1 chr1 3 5 - chr1 10 11 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df,
bioframe.closest(df1, df2,
k=1,
ignore_upstream=False,
ignore_downstream=True,
ignore_overlaps=True,
direction_col="strand"))
### closest(df1, df2, k=1, ignore_upstream=True, ignore_downstream=False,
### ignore_overlaps=True, direction_col="strand") ###
d = """chrom start end strand chrom_ start_ end_ distance
0 chr1 3 5 + chr1 10 11 5
1 chr1 3 5 - chr1 1 2 1
"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df,
bioframe.closest(df1, df2,
k=1,
ignore_upstream=True,
ignore_downstream=False,
ignore_overlaps=True,
direction_col="strand"))
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=[*funny_cols, "strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split
# (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=[*cols1, "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=[*cols2, "strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
df2 = pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])[
["chrom2", "start", "end", "strand", "animal"]
]
df2 = df2.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
assert (2, 5) == np.shape(bioframe.setdiff(df1, df1, cols1=cols1, cols2=cols1))
assert (2, 5) == np.shape(bioframe.setdiff(df1, df2, cols1=cols1, cols2=cols2))
assert (4, 5) == np.shape(
bioframe.setdiff(df1, df2, on=["strand"], cols1=cols1, cols2=cols2)
)
def test_count_overlaps():
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chr1", 6, 10, "+", "dog"],
["chr1", 6, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
assert (
bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 2, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 0, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand", "animal"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([0, 0, 0])
).all()
# overlaps with pd.NA
counts_no_nans = bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
df1_na = (pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)[["chrom1", "start", "end", "strand", "animal"]]
df2_na = (pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])).astype(
{
"start2": pd.Int64Dtype(),
"end2": pd.Int64Dtype(),
}
)[["chrom2", "start2", "end2", "strand", "animal"]]
counts_nans_inserted_after = (
pd.concat([pd.DataFrame([pd.NA]), counts_no_nans, pd.DataFrame([pd.NA])])
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype(),})[
["chrom1", "start", "end", "strand", "animal", "count"]
]
counts_nans = bioframe.count_overlaps(
df1_na,
df2_na,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
pd.testing.assert_frame_equal(
counts_nans,
bioframe.count_overlaps(
df1_na,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
),
)
assert (
counts_nans["count"].values
== counts_nans_inserted_after["count"].fillna(0).values
).all()
### coverage without return_input returns a single column dataFrame
pd.testing.assert_frame_equal(
bioframe.count_overlaps(
df1_na,
df2_na,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_input=False,
),
pd.DataFrame(counts_nans["count"]),
)
def test_assign_view():
## default assignment case
view_df = pd.DataFrame(
[
["chr11", 1, 8, "chr11p"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr11", 0, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
df_assigned = pd.DataFrame(
[
["chr11", 0, 10, "+", "chr11p"],
],
columns=["chrom", "start", "end", "strand", "view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_assigned, bioframe.assign_view(df, view_df))
# non-default columns in view
funky_view = view_df.rename(columns={"chrom": "chr"})
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df=funky_view,
cols_view=["chr", "start", "end"],
),
)
# assignment with funny view_name_col and an interval on chr2 not cataloged
# in the view_df
view_df = pd.DataFrame(
[
["chrX", 1, 8, "oranges"],
["chrX", 8, 20, "grapefruit"],
["chr1", 0, 10, "apples"],
],
columns=["chrom", "start", "end", "fruit"],
)
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
df_assigned = pd.DataFrame(
[
["chr1", 0, 10, "+", "apples"],
["chrX", 5, 10, "+", "oranges"],
["chrX", 0, 5, "+", "oranges"],
],
columns=["chrom", "start", "end", "strand", "funny_view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df,
view_name_col="fruit",
df_view_col="funny_view_region",
drop_unassigned=True,
),
)
### keep the interval with NA as its region if drop_unassigned is False
df_assigned = pd.DataFrame(
[
["chr1", 0, 10, "+", "apples"],
["chrX", 5, 10, "+", "oranges"],
["chrX", 0, 5, "+", "oranges"],
["chr2", 5, 10, "+", pd.NA],
],
columns=["chrom", "start", "end", "strand", "funny_view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df,
view_name_col="fruit",
df_view_col="funny_view_region",
drop_unassigned=False,
),
)
### assign_view with NA values assigns a view of none
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
[pd.NA, pd.NA, pd.NA, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df, bioframe.assign_view(df, view_df, view_name_col="fruit").iloc[:, :-1]
)
assert (
bioframe.assign_view(df, view_df, view_name_col="fruit")["view_region"].values
== np.array(["apples", "oranges", None, "oranges", None], dtype=object)
).all()
def test_sort_bedframe():
view_df = pd.DataFrame(
[
["chrX", 1, 8, "oranges"],
["chrX", 8, 20, "grapefruit"],
["chr1", 0, 10, "apples"],
],
columns=["chrom", "start", "end", "fruit"],
)
df = pd.DataFrame(
[
["chr2", 5, 10, "+"],
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
["chrX", 0, 5, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
# sorting just by chrom,start,end
df_sorted = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chr2", 5, 10, "+"],
["chrX", 0, 5, "+"],
["chrX", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
pd.testing.assert_frame_equal(df_sorted, bioframe.sort_bedframe(df))
# when a view_df is provided, regions without assigned views
# are placed last
df_sorted = pd.DataFrame(
[
["chrX", 0, 5, "+"],
["chrX", 5, 10, "+"],
["chr1", 0, 10, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
# test if sorting after assiging view to df denovo works,
pd.testing.assert_frame_equal(
df_sorted, bioframe.sort_bedframe(df, view_df, view_name_col="fruit")
)
# non-default columns in view
funky_view = view_df.rename(columns={"chrom": "chr"})
pd.testing.assert_frame_equal(
df_sorted,
bioframe.sort_bedframe(
df,
view_df=funky_view,
view_name_col="fruit",
cols_view=["chr", "start", "end"],
),
)
# also test if sorting after assigning view to df denovo works with
# default view_name_col
pd.testing.assert_frame_equal(
df_sorted, bioframe.sort_bedframe(df, view_df.rename(columns={"fruit": "name"}))
)
# also test if sorting after assiging view to df from chromsizes-like
# dictionary works:
pd.testing.assert_frame_equal(
df_sorted, bioframe.sort_bedframe(df, view_df={"chrX": 20, "chr1": 10})
)
### 'df' has no column 'view_region', so this should raise a ValueError
assert pytest.raises(
ValueError,
bioframe.sort_bedframe,
df,
view_df,
view_name_col="fruit",
df_view_col="view_region",
)
### sort_bedframe with NA entries:
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
[pd.NA, pd.NA, pd.NA, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# NA put at end
assert pd.isna(bioframe.sort_bedframe(df)["chrom"].values[-1])
assert pd.isna(
bioframe.sort_bedframe(df, view_df, view_name_col="fruit")["chrom"].values[-1]
)
assert (
df.dtypes == bioframe.sort_bedframe(df, view_df, view_name_col="fruit").dtypes
).all()
|
1ed96fb4456af7633fd2d8ffa6fc40bc6059bf08
|
6defeaa9e3eff61cd861c855ed2f65db2a457564
|
/onmt/inputters/news_dataset.py
|
24f5445778e2d88f34a7c213a0d4fe4a24fa7590
|
[
"MIT"
] |
permissive
|
memray/OpenNMT-kpg-release
|
50439d2a58d4499b3a4b1d1fdb586d266c4367e7
|
d16bf09e21521a6854ff3c7fe6eb271412914960
|
refs/heads/master
| 2023-08-17T14:32:04.442881
| 2023-01-31T03:24:46
| 2023-01-31T03:24:46
| 213,238,221
| 222
| 34
|
MIT
| 2023-07-22T18:03:01
| 2019-10-06T20:23:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 47,907
|
py
|
news_dataset.py
|
# -*- coding: utf-8 -*-
import copy
import json
import logging
import random
import time
from collections import Counter
from functools import partial
from itertools import starmap
from multiprocessing import pool, Pool
import six
import torch
import torchtext
from transformers import AutoTokenizer, RobertaTokenizerFast, RobertaTokenizer, AddedToken
from torchtext.data import Field, RawField
from torchtext.data import Dataset as TorchtextDataset
from torchtext.data import Example
from tqdm import tqdm
from onmt.inputters.datareader_base import DataReaderBase
from onmt.inputters.dataset_base import _join_dicts, _dynamic_dict
class Token():
def __init__(self, token, field):
self.token = token
self.field = field
def process_news_example(ex_dict, tgt_fields, tokenizer=None, tgt_weights=None,
add_meta_label=True, add_field_label=True,
has_special_vocab=False,
max_src_len=None, max_tgt_len=None,
return_type='str'):
dataset = ex_dict['source']
if add_meta_label or add_field_label:
assert has_special_vocab, 'if add_meta_label or add_field_label, special_vocab must be given.'
# if tokenizer_fn is given, tokenize text on-the-fly
if tokenizer:
text = ex_dict['text'].replace('\n', '[SEP_PAR]')
ex_dict['text_tokens'] = ['[SEP_PAR]'] + tokenizer.tokenize(text)
text_tokens = [Token(t, field='[PART_MAINBODY]') for t in ex_dict['text_tokens']]
title = ex_dict['title'].replace('\n', ' ')
ex_dict['title_tokens'] = tokenizer.tokenize(title)
title_tokens = [Token(t, field='[PART_TITLE]') for t in ex_dict['title_tokens']]
summary = ex_dict['text'].replace('\n', '[SEP_SUM]')
ex_dict['summary_tokens'] = tokenizer.tokenize(summary)
summary_tokens = [Token(t, field='[PART_SUMMARY]') for t in ex_dict['summary_tokens']]
if dataset == 'cnndm' or dataset == 'xsum':
desc = ex_dict['metadata']['description'].replace('\n', '[SEP_SUM]')
ex_dict['desc_tokens'] = tokenizer.tokenize(desc)
desc_tokens = [Token(t, field='[PART_DESCRIPTION]') for t in ex_dict['desc_tokens']]
else:
desc_tokens = None
else:
ex_dict['text_tokens'] = ['[SEP_PAR]']+[w if w != '\n' else '[SEP_PAR]' for w in ex_dict['text_tokens']]
text_tokens = [Token(t, field='[PART_MAINBODY]') for t in ex_dict['text_tokens']]
title_tokens = [w for w in ex_dict['title_tokens'] if w != '\n']
title_tokens = [Token(t, field='[PART_TITLE]') for t in title_tokens]
summary_tokens = [w if w != '\n' else '[SEP_SUM]' for w in ex_dict['summary_tokens']]
summary_tokens = [Token(t, field='[PART_SUMMARY]') for t in summary_tokens]
if 'desc_tokens' in ex_dict:
desc_tokens = [w if w != '\n' else '[SEP_SUM]' for w in ex_dict['desc_tokens']]
desc_tokens = [Token(t, field='[PART_DESCRIPTION]') for t in desc_tokens]
else:
desc_tokens = None
# randomly select a target and use the rest as source
copied_tgt_fields = copy.copy(tgt_fields)
if 'description' in copied_tgt_fields and not desc_tokens:
copied_tgt_fields.remove('description')
if 'title' in copied_tgt_fields and not title_tokens:
copied_tgt_fields.remove('title')
if 'summary' in copied_tgt_fields and not summary_tokens:
copied_tgt_fields.remove('summary')
if len(copied_tgt_fields) == 0:
pass
if len(copied_tgt_fields) > 0:
tgt_field = random.choices(copied_tgt_fields, weights=tgt_weights, k=1)[0]
else:
# for cases during testing, but no valid target field
tgt_field = tgt_fields[0]
infill_placeholder_token = Token('[PART_INFILL_PLACE]', field='[PART_INFILL_PLACE]')
if tgt_field == 'summary':
# remove title to avoid info-leaking during multi-dataset training
# changed from : src_tokens = title_tokens + sep_token + text_tokens
src_tokens = title_tokens + text_tokens
tgt_tokens = summary_tokens
elif tgt_field == 'title':
src_tokens = text_tokens
tgt_tokens = title_tokens
elif tgt_field == 'description' and desc_tokens:
src_tokens = title_tokens + text_tokens
tgt_tokens = desc_tokens
elif tgt_field == 'randomsent':
raise NotImplementedError
else:
raise NotImplementedError
# meta_label: prepend metadata labels to src tokens
if add_meta_label:
meta_tokens = get_meta_tokens(ex_dict, dataset, tgt_field)
src_tokens = meta_tokens + src_tokens
# RoBERTa model requires special tokens in order to work.
if tokenizer:
cls_token = Token(token=tokenizer.cls_token, field='[PART_METALABEL]')
src_tokens = [cls_token] + src_tokens
if max_src_len and len(src_tokens) > max_src_len:
src_tokens = src_tokens[: max_src_len - 1]
trunc_token = Token(token='[MAINBODY_TRUNCATED_END]', field='[PART_MAINBODY]')
src_tokens.append(trunc_token)
if max_tgt_len and len(tgt_tokens) > max_tgt_len:
tgt_tokens = tgt_tokens[: max_tgt_len - 1]
if return_type == 'str':
if add_field_label:
src = ' '.join([t.token + u'│' + t.field for t in src_tokens]) + '\n'
else:
src = ' '.join([t.token for t in src_tokens]) + '\n'
tgt = ' '.join([t.token for t in tgt_tokens]) + '\n'
# (OpenNMT will prepend a <s> to tgt, but tgt_mask is not used at all)
src_mask = torch.LongTensor([1] * len(src_tokens))
tgt_mask = torch.LongTensor([1] * (len(tgt_tokens) + 1))
else:
# TODO, directly use tokenizer to encode
src = torch.LongTensor([1] * len(src_tokens))
tgt = torch.LongTensor([1] * len(src_tokens))
src_mask = torch.LongTensor([1] * len(src_tokens))
tgt_mask = torch.LongTensor([1] * len(tgt_tokens))
new_ex_dict = {'src': src, 'tgt': tgt,
'indices': ex_dict['indices'] if 'indices' in ex_dict else None,
# for pretrained model
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_tokens': [t.token for t in src_tokens],
'tgt_tokens': [t.token for t in tgt_tokens],
}
return new_ex_dict
def process_tokenized_news_example(ex_dict, tokenizer_name, tokenizer,
tgt_fields, tgt_weights=None,
add_meta_label=True, add_field_label=False,
has_special_vocab=False,
max_src_len=None, max_tgt_len=None,
return_type='str'):
dataset = ex_dict['source']
if add_meta_label or add_field_label:
assert has_special_vocab, 'if add_meta_label or add_field_label, special_vocab must be given.'
if tokenizer_name not in ex_dict:
raise NotImplementedError('Tokenized data is not found in the json.')
tokenized_data = ex_dict[tokenizer_name]['token']
encoded_data = ex_dict[tokenizer_name]['code']
title_sents_tokens = tokenized_data['title_sents']
title_tokens = title_sents_tokens[0]
summary_tokens = tokenized_data['summary']
desc_tokens = tokenized_data['description']
title_sents_codes = encoded_data['title_sents']
title_codes = title_sents_codes[0]
summary_codes = encoded_data['summary']
desc_codes = encoded_data['description']
if len(title_sents_tokens) == 0 or len(title_sents_codes) == 0:
return None
masks = ex_dict[tokenizer_name]['oracle_mask']
oracle_sent_mask = ex_dict[tokenizer_name]['oracle_sent_mask']
# our model has special_vocab, released BART doesn't
if has_special_vocab:
# replace the [PAD] to [SEP_PAR] (a preprocessing mistake)
title_sents_tokens = [s if s!=['[PAD]'] else ['[SEP_PAR]'] for s in title_sents_tokens]
pad_token_id = tokenizer.convert_tokens_to_ids(['[PAD]'])
sep_token_id = tokenizer.convert_tokens_to_ids(['[SEP_PAR]'])
title_sents_codes = [s if s!=pad_token_id else sep_token_id for s in title_sents_codes]
# append a [SEP_PAR] to the beginning of title
title_sents_tokens = [['[SEP_PAR]']] + title_sents_tokens
title_sents_codes = [sep_token_id] + title_sents_codes
masks = {k: [[0]]+v for k,v in masks.items()}
oracle_sent_mask = [0] + oracle_sent_mask
# add a sentence head token `[HEAD_SENT]` to all sentences
title_sents_tokens = [s if len(s) == 1 else ['[HEAD_SENT]']+s for s in title_sents_tokens]
senthead_token_id = tokenizer.convert_tokens_to_ids('[HEAD_SENT]')
title_sents_codes = [s if len(s) == 1 else [senthead_token_id]+s for s in title_sents_codes]
masks = {k: [s if len(s) == 1 else [0]+s for s in v] for k,v in masks.items()}
else:
# replace the [PAD] to [SEP_PAR] (a previous mistake). tokens and codes may not match
dot_token_id = tokenizer.convert_tokens_to_ids(['.'])
if len(title_sents_tokens) > 1 and title_sents_tokens[1] == ['[PAD]']:
title_sents_tokens[1] = ['.']
title_sents_codes[1] = dot_token_id
new_title_sents_tokens = []
new_title_sents_codes = []
for sent_token, sent_code in zip(title_sents_tokens, title_sents_codes):
new_sent_token, new_sent_code = [], []
for t, c in zip(sent_token, sent_code):
if t!='[PAD]' and c < tokenizer.vocab_size:
new_sent_token.append(t)
new_sent_code.append(c)
assert len(new_sent_token) == len(new_sent_code), 'sentence lengths of token and code mismatch'
if len(new_sent_token) > 0 and len(new_sent_code) > 0:
new_title_sents_tokens.append(new_sent_token)
new_title_sents_codes.append(new_sent_code)
title_sents_tokens = new_title_sents_tokens
title_sents_codes = new_title_sents_codes
# randomly select a target and use the rest as source
copied_tgt_dict = {k:v for k,v in zip(tgt_fields, tgt_weights)}
if 'description' in copied_tgt_dict and not desc_tokens:
del copied_tgt_dict['description']
if 'title' in copied_tgt_dict and not title_tokens:
del copied_tgt_dict['title']
if 'summary' in copied_tgt_dict and not summary_tokens:
del copied_tgt_dict['summary']
if 'randomsent' in copied_tgt_dict and len(title_sents_tokens) < 4:
del copied_tgt_dict['randomsent']
if len(copied_tgt_dict) == 0:
return None
copied_tgt_fields = list(copied_tgt_dict.keys())
copied_tgt_weights = list(copied_tgt_dict.values())
if len(copied_tgt_fields) > 0:
tgt_field = random.choices(copied_tgt_fields, weights=copied_tgt_weights, k=1)[0]
else:
# for cases during testing, but no valid target field
tgt_field = tgt_fields[0]
# field token: sentence position tokens
field_tokens = []
sent_count = 0
for s in title_sents_tokens:
if len(s) == 1 and s[0] == '[SEP_PAR]':
field_tokens.append(['[SEP_PAR]'])
else:
sent_count = sent_count if sent_count <= 127 else 127
field_tokens.append(['[SENT_POS_%d]' % sent_count] * len(s))
sent_count += 1
# determine src/tgt
if tgt_field == 'summary':
src_tokens = title_sents_tokens
src_codes = title_sents_codes
tgt_tokens = summary_tokens
tgt_codes = summary_codes
elif tgt_field == 'description' and desc_tokens:
src_tokens = title_sents_tokens
src_codes = title_sents_codes
tgt_tokens = desc_tokens
tgt_codes = desc_codes
elif tgt_field == 'title':
tgt_tokens = title_sents_tokens[1]
tgt_codes = title_sents_codes[1]
src_tokens = title_sents_tokens
src_codes = title_sents_codes
src_tokens[1] = ['[HEAD_SENT]', tokenizer.mask_token]
src_codes[1] = [senthead_token_id, tokenizer.mask_token_id]
field_tokens[1] = [field_tokens[1][0]] * 2
masks = {k: [[0, 0] if si==1 else s for si, s in enumerate(v)] for k, v in masks.items()}
elif tgt_field == 'randomsent':
accum_len = 0
for sentid, sent in enumerate(title_sents_tokens):
if accum_len > max_src_len:
break
accum_len += len(sent)
max_sentid = sentid
sentid = random.randint(2, max_sentid)
iter_count = 0
while len(title_sents_tokens[sentid]) < 5 and iter_count < 5:
iter_count += 1
sentid = random.randint(2, max_sentid)
tgt_tokens = title_sents_tokens[sentid]
tgt_codes = title_sents_codes[sentid]
src_tokens = title_sents_tokens
src_codes = title_sents_codes
src_tokens[sentid] = ['[HEAD_SENT]', tokenizer.mask_token]
src_codes[sentid] = [senthead_token_id, tokenizer.mask_token_id]
field_tokens[sentid] = [field_tokens[sentid][0]] * 2
masks = {k: [[0, 0] if si==sentid else s for si, s in enumerate(v)] for k, v in masks.items()}
else:
raise NotImplementedError
# meta_label: prepend metadata labels to src tokens
if add_meta_label:
# RoBERTa model requires special tokens in order to work.
meta_tokens = get_meta_tokens(ex_dict, dataset, tgt_field,
cls_token=tokenizer.cls_token, token_only=False)
meta_texts = [t.token for t in meta_tokens]
meta_fields = [t.field for t in meta_tokens]
meta_codes = tokenizer.convert_tokens_to_ids(meta_texts)
assert len(meta_tokens) == len(meta_codes) == len(meta_codes)
src_tokens = [meta_texts] + src_tokens
src_codes = [meta_codes] + src_codes
field_tokens = [meta_fields] + field_tokens
masks = {k: [[0] * len(meta_tokens)]+v for k,v in masks.items()}
oracle_sent_mask = [0] + oracle_sent_mask
else:
src_tokens = [[tokenizer.cls_token]] + src_tokens
src_codes = [[tokenizer.cls_token_id]] + src_codes
field_tokens = [[tokenizer.cls_token]] + field_tokens
masks = {k: [[0]]+v for k,v in masks.items()}
oracle_sent_mask = [0] + oracle_sent_mask
# add sent-level token pointing to the [SEP_PAR] prior to the oracle sentence
oracle_sent_head_mask = [[0] * len(s) for s in src_tokens]
if has_special_vocab:
for sent_id, sent_mask in enumerate(oracle_sent_mask):
if sent_mask == 1:
# 1st token must be a `[HEAD_SENT]`
if src_tokens[sent_id][0] != '[HEAD_SENT]':
print(src_tokens[sent_id])
pass
# assert len(oracle_sent_head_mask[sent_id-1]) == 1
oracle_sent_head_mask[sent_id][0] = 1
masks['sentence_head'] = oracle_sent_head_mask
# flatten all data before returning
src_tokens = [t for s in src_tokens for t in s]
src_codes = [t for s in src_codes for t in s]
field_tokens = [t for s in field_tokens for t in s]
field_codes = tokenizer.encode(field_tokens)
masks = {k: [t for m in v for t in m] for k,v in masks.items()}
len_src = len(src_tokens)
if has_special_vocab:
assert len_src == len(src_codes) == len(field_codes) == len(masks['word'])
else:
assert len_src == len(src_codes)
assert len(tgt_tokens) == len(tgt_codes)
# truncate scr/tgt sequences
if max_src_len and len(src_tokens) > max_src_len:
src_codes = src_codes[: max_src_len]
src_tokens = src_tokens[: max_src_len]
field_tokens = field_tokens[: max_src_len]
field_codes = field_codes[: max_src_len]
masks = {k: v[: max_src_len] for k,v in masks.items()}
if max_tgt_len and len(tgt_tokens) > max_tgt_len:
tgt_tokens = tgt_tokens[: max_tgt_len]
tgt_codes = tgt_codes[: max_tgt_len]
# add bos and eos to tgt_tokens/tgt_codes
tgt_tokens = [tokenizer.bos_token] + tgt_tokens + [tokenizer.eos_token]
tgt_codes = [tokenizer.bos_token_id] + tgt_codes + [tokenizer.eos_token_id]
# tensorize them
src_codes = torch.LongTensor(src_codes)
field_codes = torch.LongTensor(field_codes)
tgt_codes = torch.LongTensor(tgt_codes)
masks = {'ext_' + k: torch.LongTensor(v) for k,v in masks.items()}
src = src_codes
tgt = tgt_codes
src_mask = torch.LongTensor([1] * len(src_tokens))
tgt_mask = torch.LongTensor([1] * len(tgt_tokens))
new_ex_dict = {
'src': src, 'tgt': tgt, 'src_field': field_codes,
'src_length': len(src_tokens), 'tgt_length': len(tgt_tokens),
'src_mask': src_mask, 'tgt_mask': tgt_mask,
'src_tokens': src_tokens, 'tgt_tokens': tgt_tokens,
'indices': ex_dict['indices'] if 'indices' in ex_dict else None,
}
new_ex_dict.update(masks)
return new_ex_dict
DATASET_TOKEN_MAP = {'cnndm': '[DATASET_CNNDM]',
'nyt': '[DATASET_NYT]',
'newsroom': '[DATASET_NEWSROOM]',
'xsum': '[DATASET_XSUM]',
'gigaword5': '[DATASET_GIGAWORD5]',
'newscrawl': '[DATASET_NEWSCRAWL]'
}
DENSITY_BIN_MAP = {'extractive': '[BIN_DENSITY_EXT]',
'abstractive': '[BIN_DENSITY_ABS]',
'mixed': '[BIN_DENSITY_MIX]',
'unknown': '[BIN_DENSITY_UNK]'
}
def get_meta_tokens(doc, dataset_name, tgt_field, cls_token=None, token_only=False):
# dataset label
if dataset_name == 'cnn' or dataset_name == 'dailymail':
dataset_name = 'cnndm'
if dataset_name == 'newyorktimes':
dataset_name = 'nyt'
dataset_token = Token(DATASET_TOKEN_MAP[dataset_name], field='[PART_METALABEL]')
# target type label
target_token = tgt_field
target_token = Token('[%s]' % target_token.upper(), field='[PART_METALABEL]')
# density bin label, currently only newsroom has density_bin labels
if dataset_name == 'newsroom':
density_bin = doc['metadata']['density_bin']
density_bin_token = Token(DENSITY_BIN_MAP[density_bin], field='[PART_METALABEL]')
else:
density_bin_token = Token(DENSITY_BIN_MAP['unknown'], field='[PART_METALABEL]')
meta_tokens = [dataset_token, target_token, density_bin_token]
# required by models like RoBERTa
if cls_token:
cls_token = Token(cls_token, field='[PART_METALABEL]')
meta_tokens = [cls_token] + meta_tokens
if token_only:
meta_tokens = [t.token for t in meta_tokens]
return meta_tokens
def process_news_examples_parallel(news_examples, tgt_fields, tgt_weights,
meta_label, field_label, has_special_vocab,
max_src_len=None, max_tgt_len=None,
tokenizer=None, multi_process=False):
# news_examples = list(news_examples)
# news_examples = list(news_examples)[:1000]
if multi_process:
"""pretrained_tokenizer seems not multi-processing safe
error out "RuntimeError: received 0 items of ancdata" after a few examples (484~490)
speed is slow as well ~20 it/s, single-processing is ~60 it/s
"""
processed_list = []
partial_fn = partial(process_news_example, tgt_fields=tgt_fields,
tokenizer=tokenizer, tgt_weights=tgt_weights,
add_meta_label=meta_label, add_field_label=field_label,
has_special_vocab=has_special_vocab,
max_src_len=max_src_len, max_tgt_len=max_tgt_len,
)
with Pool(processes=4) as pool:
for processed_ex in tqdm(pool.imap(partial_fn, news_examples),
desc='Preparing src and tgt w/ multi-processing (tokenizing and field tokens)'):
processed_list.append(processed_ex)
"""
print('Preparing src and tgt w/ multiple processing (tokenizing and field tokens)')
start_time = time.clock()
pool = Pool(1)
processed_list = pool.map(partial(process_news_example, tgt_fields=tgt_fields,
tokenizer=tokenizer, tgt_weights=tgt_weights,
add_meta_label=meta_label, add_field_label=field_label),
news_examples)
pool.close()
end_time = time.clock()
print("Process finished, elapsed time=%.4f, speed=%.2f it/s" % (end_time-start_time,
len(processed_list)/(end_time-start_time)))
"""
else:
processed_list = [process_news_example(ex, tgt_fields=tgt_fields,
tokenizer=tokenizer, tgt_weights=tgt_weights,
add_meta_label=meta_label, add_field_label=field_label,
max_src_len=max_src_len, max_tgt_len=max_tgt_len,)
for ex in tqdm(news_examples,
desc='Preparing src and tgt w/ single processing (tokenizing and field tokens)')]
new_processed_list = []
for didx, d in enumerate(processed_list):
# filter out None items
if not d:
print('Error when loading data point %d, skip for now' % didx)
continue
d['indices'] = didx
new_processed_list.append(d)
return new_processed_list
def load_tokenized_news_examples(news_examples, tokenizer_name, tokenizer,
tgt_fields, tgt_weights,
meta_label, field_label,
has_special_vocab,
max_src_len=None, max_tgt_len=None, ):
processed_list = []
for ex_id, ex_dict in tqdm(enumerate(news_examples), desc='Loading tensorized src and tgt'):
# if ex_id >= 500:
# break
try:
ex_data_dict = process_tokenized_news_example(ex_dict, tokenizer_name, tokenizer,
tgt_fields=tgt_fields, tgt_weights=tgt_weights,
add_meta_label=meta_label, add_field_label=field_label,
has_special_vocab=has_special_vocab,
max_src_len=max_src_len, max_tgt_len=max_tgt_len)
if ex_data_dict is None:
# logging.warning("No valid %s is found in data %d, "
# "or source text is faulty, title=`%s`, len(text)=%d"
# % (str(tgt_fields), ex_id, ex_dict['title'], len(ex_dict['text'])))
continue
ex_data_dict['indices'] = ex_id
processed_list.append(ex_data_dict)
except Exception as e:
logging.error("Error while processing %d data: %s" % (ex_id, ex_dict))
logging.getLogger().exception('Exception message: ' + str(e))
continue
return processed_list
def build_dynamic_dict_and_masks_parallel(read_iters, fields, boseos_added, alignment_loss, alignment_targets, multi_process=False):
src_vocabs = []
stemmed_src_vocabs = []
ex_dicts = []
if multi_process:
partial_fn = partial(_dynamic_dict,
src_field=fields['src'].base_field,
tgt_field=fields['tgt'].base_field,
boseos_added=boseos_added)
with Pool(processes=4) as pool:
for src_ex_vocab, example in tqdm(pool.imap(partial_fn, starmap(_join_dicts, zip(*read_iters))),
desc='Preparing src and tgt w/ multi-processing (tokenizing and field tokens)'):
src_vocabs.append(src_ex_vocab)
ex_dicts.append(example)
"""
print('Processing news examples w/ multiple processing (building dynamic_dict)')
start_time = time.clock()
pool = Pool()
processed_list = pool.map(partial(_dynamic_dict,
src_field=fields['src'].base_field, tgt_field=fields['tgt'].base_field),
starmap(_join_dicts, zip(*read_iters)))
end_time = time.clock()
src_vocabs = [i[0] for i in processed_list]
ex_dicts = [i[1] for i in processed_list]
print("Process finished, elapsed time=%.4f, speed=%.2f it/s" % (end_time-start_time,
len(processed_list)/(end_time-start_time)))
"""
else:
for ex_dict in tqdm(starmap(_join_dicts, zip(*read_iters)), desc='Processing news examples w/ single processing (building dynamic_dict)'):
if hasattr(fields['src'], 'base_field'):
src_field = fields['src'].base_field
tgt_field = fields['tgt'].base_field
else:
src_field = fields['src']
tgt_field = fields['tgt']
# this assumes src_field and tgt_field are both text
ex_dict, src_ex_vocab, stemmed_src_ex_vocab = _dynamic_dict(
ex_dict, src_field, tgt_field,
boseos_added=boseos_added,
alignment_loss=alignment_loss,
alignment_targets=alignment_targets
)
src_vocabs.append(src_ex_vocab)
stemmed_src_vocabs.append(stemmed_src_ex_vocab)
ex_dicts.append(ex_dict)
return ex_dicts, src_vocabs, stemmed_src_vocabs
class NewsDataset(TorchtextDataset):
"""Contain data and process it.
A dataset is an object that accepts sequences of raw data (sentence pairs
in the case of machine translation) and fields which describe how this
raw data should be processed to produce tensors. When a dataset is
instantiated, it applies the fields' preprocessing pipeline (but not
the bit that numericalizes it or turns it into batch tensors) to the raw
data, producing a list of :class:`torchtext.data.Example` objects.
torchtext's iterators then know how to use these examples to make batches.
Args:
fields (dict[str, List[Tuple[str, Field]]]): a dict with the structure
returned by :func:`onmt.inputters.get_fields()`. Usually
that means the dataset side, ``"src"`` or ``"tgt"``. Keys match
the keys of items yielded by the ``readers``, while values
are lists of (name, Field) pairs. An attribute with this
name will be created for each :class:`torchtext.data.Example`
object and its value will be the result of applying the Field
to the data that matches the key. The advantage of having
sequences of fields for each piece of raw input is that it allows
the dataset to store multiple "views" of each input, which allows
for easy implementation of token-level features, mixed word-
and character-level models, and so on. (See also
:class:`onmt.inputters.TextMultiField`.)
readers (Iterable[onmt.inputters.DataReaderBase]): Reader objects
for disk-to-dict. The yielded dicts are then processed
according to ``fields``.
data (Iterable[Tuple[str, Any]]): (name, ``data_arg``) pairs
where ``data_arg`` is passed to the ``read()`` method of the
reader in ``readers`` at that position. (See the reader object for
details on the ``Any`` type.)
dirs (Iterable[str or NoneType]): A list of directories where
data is contained. See the reader object for more details.
sort_key (Callable[[torchtext.data.Example], Any]): A function
for determining the value on which data is sorted (i.e. length).
filter_pred (Callable[[torchtext.data.Example], bool]): A function
that accepts Example objects and returns a boolean value
indicating whether to include that example in the dataset.
Attributes:
src_vocabs (List[torchtext.data.Vocab]): Used with dynamic dict/copy
attention. There is a very short vocab for each src example.
It contains just the source words, e.g. so that the generator can
predict to copy them.
"""
def __init__(self, fields, readers, data, dirs, sort_key,
tokenizer=None, filter_pred=None, opt=None):
self.sort_key = sort_key
self.tokenizer = tokenizer
self.opt = opt
# build src_map/alignment no matter field is available
can_copy = True
boseos_added = False
if hasattr(opt, 'special_vocab_path'):
if opt.special_vocab_path is None or opt.special_vocab_path == 'none' or opt.special_vocab_path == 'None':
has_special_vocab = False
else:
has_special_vocab = True
else:
has_special_vocab = False
logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR)
# data is directly given if it's called from translate.py
if opt.data_format == 'srctgt' or data:
read_iters = [r.read(dat[1], dat[0], dir_) for r, dat, dir_
in zip(readers, data, dirs)]
elif opt.data_format == 'jsonl':
# only for cases that directly load data from json files.
read_iters = [r.read_jsonl(dir_) for r, dir_
in zip(readers, dirs)]
read_iters = read_iters[0]
tokenizer = self.tokenizer if self.tokenizer else None
read_iters = process_news_examples_parallel(read_iters, opt.tgt_fields, opt.tgt_weights,
opt.meta_label, opt.field_label,
has_special_vocab=has_special_vocab,
max_src_len=opt.src_seq_length_trunc,
max_tgt_len=opt.tgt_seq_length_trunc,
tokenizer=tokenizer,
multi_process=False)
read_iters = [[d for d in read_iters if d is not None]]
elif opt.data_format == 'jsonl_tensor':
# text has been tokenized and tensorized in advance (but not completely)
boseos_added = True
read_iters = [r.read_jsonl(dir_) for r, dir_
in zip(readers, dirs)]
read_iters = read_iters[0]
tokenizer = self.tokenizer if self.tokenizer else None
read_iters = load_tokenized_news_examples(read_iters, opt.pretrained_tokenizer,
tokenizer,
opt.tgt_fields, opt.tgt_weights,
opt.meta_label, opt.field_label,
has_special_vocab=has_special_vocab,
max_src_len=opt.src_seq_length_trunc,
max_tgt_len=opt.tgt_seq_length_trunc)
read_iters = [[d for d in read_iters if d is not None]]
else:
raise NotImplementedError
# build dynamic_dict for copynet and masks for pretrained models
# self.src_vocabs is used in collapse_copy_scores and Translator.py
alignment_loss = opt.alignment_loss if hasattr(opt, 'alignment_loss') else None
alignment_targets = opt.alignment_targets if hasattr(opt, 'alignment_targets') else None
ex_dicts, self.src_vocabs, self.stemmed_src_vocabs = build_dynamic_dict_and_masks_parallel(read_iters, fields,
boseos_added=boseos_added,
alignment_loss=alignment_loss,
alignment_targets=alignment_targets
)
examples = []
for ex_dict in tqdm(ex_dicts, desc='Processing data examples'):
ex_fields = {k: [(k, v)] for k, v in fields.items() if
k in ex_dict}
ex = Example.fromdict(ex_dict, ex_fields)
examples.append(ex)
# fields needs to have only keys that examples have as attrs
fields = []
for _, nf_list in ex_fields.items():
assert len(nf_list) == 1
fields.append(nf_list[0])
logging.getLogger().info("Loaded %d data examples from %s" % (len(examples), str(dirs)))
super(NewsDataset, self).__init__(examples, fields, filter_pred)
def reload_fields(self):
pass
def __getattr__(self, attr):
# avoid infinite recursion when fields isn't defined
if 'fields' not in vars(self):
raise AttributeError
if attr in self.fields:
return (getattr(x, attr) for x in self.examples)
else:
raise AttributeError
def save(self, path, remove_fields=True):
if remove_fields:
self.fields = []
torch.save(self, path)
def load_config(self, opt):
self.opt = opt.opt
def load_dataset_from_jsonl(fields, paths, tokenizer, opt):
dataset = NewsDataset(
fields,
readers=[NewsDataReader()],
data=None,
dirs=paths,
sort_key=news_sort_key,
tokenizer=tokenizer,
opt=opt,
)
return dataset
class NewsDataReader(DataReaderBase):
def read(self, sequences, side, _dir=None):
"""Read text data from disk.
Read from both src and tgt files.
Args:
sequences (str or Iterable[str]):
path to text file or iterable of the actual text data.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
_dir (NoneType): Leave as ``None``. This parameter exists to
conform with the :func:`DataReaderBase.read()` signature.
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
assert _dir is None or _dir == "", \
"Cannot use _dir with TextDataReader."
if isinstance(sequences, str):
sequences = DataReaderBase._read_file(sequences)
for i, seq in enumerate(sequences):
if isinstance(seq, six.binary_type):
seq = seq.decode("utf-8")
yield {side: seq, "indices": i}
def read_jsonl(self, sequences, _dir=None):
"""Read keyphrase data from disk. Current supported data format is JSON only.
Args:
sequences (str or Iterable[str]):
path to text file or iterable of the actual text data.
_dir (NoneType): Leave as ``None``. This parameter exists to
conform with the :func:`DataReaderBase.read()` signature.
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
assert _dir is None or _dir == "", \
"Cannot use _dir with KeyphraseDataReader."
if isinstance(sequences, str):
sequences = DataReaderBase._read_file(sequences)
# we need to make indices be the real index of the list, so replace it with a counter
count = 0
for i, line in enumerate(sequences):
try:
# default input is a jsonl line
line = line.decode("utf-8")
data = json.loads(line)
except Exception:
# data must be a dict
if not data or len(line.strip()) == 0 or not isinstance(data, dict):
continue
# insert `indices`
count += 1
data['indices'] = count
yield data
def news_sort_key(ex):
"""Sort using the number of tokens in the sequence."""
src_len = ex.src.shape[0] if isinstance(ex.src, torch.Tensor) else len(ex.src[0]) + 2
tgt_len = ex.tgt.shape[0] if isinstance(ex.tgt, torch.Tensor) else len(ex.tgt[0]) + 1
if hasattr(ex, "tgt"):
return src_len, tgt_len
return src_len
# mix this with partial
def _feature_tokenize(
string, layer=0, tok_delim=None, feat_delim=None, truncate=None):
"""Split apart word features (like POS/NER tags) from the tokens.
Args:
string (str): A string with ``tok_delim`` joining tokens and
features joined by ``feat_delim``. For example,
``"hello|NOUN|'' Earth|NOUN|PLANET"``.
layer (int): Which feature to extract. (Not used if there are no
features, indicated by ``feat_delim is None``). In the
example above, layer 2 is ``'' PLANET``.
truncate (int or NoneType): Restrict sequences to this length of
tokens.
Returns:
List[str] of tokens.
"""
tokens = string.split(tok_delim)
if truncate is not None:
tokens = tokens[:truncate]
if feat_delim is not None:
# some wierd bug appears in XSum (on│[PART_MAINBODY] 0800 555 111│[PART_MAINBODY])
tokens = [t.split(feat_delim) for t in tokens]
tokens = [t for t in tokens if len(t)>layer]
tokens = [t[layer] for t in tokens]
return tokens
class NewsMultiField(RawField):
"""Container for subfields.
Text data might use POS/NER/etc labels in addition to tokens.
This class associates the "base" :class:`Field` with any subfields.
It also handles padding the data and stacking it.
Args:
base_name (str): Name for the base field.
base_field (Field): The token field.
feats_fields (Iterable[Tuple[str, Field]]): A list of name-field
pairs.
Attributes:
fields (Iterable[Tuple[str, Field]]): A list of name-field pairs.
The order is defined as the base field first, then
``feats_fields`` in alphabetical order.
"""
def __init__(self, base_name, base_field, feats_fields):
super(NewsMultiField, self).__init__()
self.fields = [(base_name, base_field)]
for name, ff in sorted(feats_fields, key=lambda kv: kv[0]):
self.fields.append((name, ff))
# added by @memray for post-feature control
self.meta_label = False
self.field_label = False
self.num_meta_label = None
@property
def base_field(self):
return self.fields[0][1]
def process(self, batch, device=None):
"""Convert outputs of preprocess into Tensors.
Args:
batch (List[List[List[str]]]): A list of length batch size.
Each element is a list of the preprocess results for each
field (which are lists of str "words" or feature tags.
device (torch.device or str): The device on which the tensor(s)
are built.
Returns:
torch.LongTensor or Tuple[LongTensor, LongTensor]:
A tensor of shape ``(seq_len, batch_size, len(self.fields))``
where the field features are ordered like ``self.fields``.
If the base field returns lengths, these are also returned
and have shape ``(batch_size,)``.
"""
# batch (list(list(list))): batch_size x len(self.fields) x seq_len
# hope to truncate batch here!!!
batch_by_feat = list(zip(*batch))
base_data = self.base_field.process(batch_by_feat[0], device=device)
if self.base_field.include_lengths:
# lengths: batch_size
base_data, lengths = base_data
feats = [ff.process(batch_by_feat[i], device=device)
for i, (_, ff) in enumerate(self.fields[1:], 1)]
levels = [base_data] + feats
# data: seq_len x batch_size x len(self.fields)
data = torch.stack(levels, 2)
if self.base_field.include_lengths:
return data, lengths
else:
return data
def preprocess(self, x):
"""Preprocess data.
Args:
x (str): A sentence string (words joined by whitespace).
Returns:
List[List[str]]: A list of length ``len(self.fields)`` containing
lists of tokens/feature tags for the sentence. The output
is ordered like ``self.fields``.
"""
return [f.preprocess(x) for _, f in self.fields]
def __getitem__(self, item):
return self.fields[item]
def news_fields(**kwargs):
"""Create text fields.
Args:
base_name (str): Name associated with the field.
n_feats (int): Number of word level feats (not counting the tokens)
include_lengths (bool): Optionally return the sequence lengths.
pad (str, optional): Defaults to ``"<blank>"``.
bos (str or NoneType, optional): Defaults to ``"<s>"``.
eos (str or NoneType, optional): Defaults to ``"</s>"``.
truncate (bool or NoneType, optional): Defaults to ``None``.
Returns:
NewsMultiField
"""
n_feats = kwargs["n_feats"]
include_lengths = kwargs["include_lengths"]
base_name = kwargs["base_name"]
# change from <pad> to [PAD] and <unk> to [UNK] to be compatible with BERT
pad = "[PAD]"
unk = "[UNK]"
bos = kwargs.get("bos", "<s>")
eos = kwargs.get("eos", "</s>")
truncate = kwargs.get("truncate", None)
fields_ = []
feat_delim = u"│" if n_feats > 0 else None
for i in range(n_feats + 1):
name = base_name + "_feat_" + str(i - 1) if i > 0 else base_name
tokenize = partial(
_feature_tokenize,
layer=i,
truncate=truncate,
feat_delim=feat_delim)
use_len = i == 0 and include_lengths
feat = Field(
init_token=bos, eos_token=eos,
pad_token=pad, unk_token=unk,
tokenize=tokenize,
include_lengths=use_len)
fields_.append((name, feat))
assert fields_[0][0] == base_name # sanity check
field = NewsMultiField(fields_[0][0], fields_[0][1], fields_[1:])
return field
def update_field_vocab(field, tokenizer):
new_field = copy.copy(field)
setattr(new_field, 'lower', False)
setattr(new_field, 'pretrained_tokenizer', tokenizer)
# setattr(new_field, 'tokenize', partial(tokenizer.tokenize, add_special_tokens=False)) # will be tokenized in transform
setattr(new_field, 'bos_token', tokenizer.bos_token)
setattr(new_field, 'init_token', tokenizer.bos_token)
setattr(new_field, 'eos_token', tokenizer.eos_token)
setattr(new_field, 'pad_token', tokenizer.pad_token)
setattr(new_field, 'unk_token', tokenizer.unk_token)
if hasattr(tokenizer, 'sep_token'):
setattr(new_field, 'sep_token', tokenizer.sep_token)
if not hasattr(new_field, 'vocab_cls'):
setattr(new_field, 'vocab_cls', torchtext.vocab.Vocab(counter=Counter()))
setattr(new_field.vocab_cls, 'UNK', tokenizer.unk_token)
# Update vocab given the external pretrained vocab
if not hasattr(new_field, 'vocab'):
setattr(new_field, 'vocab', torchtext.vocab.Vocab(counter=Counter()))
stoi = tokenizer.get_vocab()
itos = [s for s,_ in sorted(tokenizer.get_vocab().items(), key=lambda x:x[1])]
setattr(new_field.vocab, 'UNK', tokenizer.unk_token)
setattr(new_field.vocab, 'unk_index', tokenizer.unk_token_id)
# will be used for copy loss (copy_generator.py, collapse_copy_scores(), L29), for cases pad_index is not 0
setattr(new_field.vocab, 'PAD', tokenizer.pad_token)
setattr(new_field.vocab, 'pad_index', tokenizer.pad_token_id)
setattr(new_field.vocab, 'stoi', stoi)
setattr(new_field.vocab, 'itos', itos)
setattr(new_field.vocab, 'freqs', Counter(itos))
return new_field
def load_pretrained_tokenizer(tokenizer_name, cache_dir, special_vocab_path=None,
bpe_vocab=None, bpe_merges=None, bpe_dropout=0.0):
assert tokenizer_name or bpe_vocab, "Either tokenizer_name or bpe_vocab must be set to load HF tokenizer."
print('Loading pretrained vocabulary, dumped to %s' % cache_dir)
if tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, cache_dir=cache_dir)
print('Vocab size=%d, base vocab size=%d' % (len(tokenizer), tokenizer.vocab_size))
if special_vocab_path is not None and special_vocab_path != 'none' and special_vocab_path != 'None':
special_tokens = [w.strip() for w in open(special_vocab_path, 'r').readlines()]
num_added_toks = tokenizer.add_tokens(special_tokens)
print('Added', num_added_toks, 'special tokens')
print('Vocab size=%d, base vocab size=%d' % (len(tokenizer), tokenizer.vocab_size))
else:
print('Special token vocab is not provided.')
else:
# initialize a slow tokenizer and convert it to fast, so that special tokens can be properly segmented
sep_token = '<sep>'
kp_special_tokens = ['<present>', '<absent>', '<category>']
tokenizer = RobertaTokenizer(vocab_file=bpe_vocab,
merges_file=bpe_merges,
sep=sep_token, # doesn't matter
additional_special_tokens=kp_special_tokens)
sep_token_id = tokenizer.convert_tokens_to_ids(sep_token)
added_sep_token = AddedToken(sep_token, lstrip=False, rstrip=False)
tokenizer.sep_token = sep_token
tokenizer._sep_token = added_sep_token
tokenizer.init_kwargs['sep_token'] = sep_token
tokenizer.all_special_ids.append(sep_token_id)
tokenizer.all_special_tokens.append(sep_token)
tokenizer.all_special_tokens_extended.append(added_sep_token)
tokenizer.special_tokens_map['sep_token'] = sep_token
tokenizer.special_tokens_map_extended['sep_token'] = added_sep_token
tokenizer.unique_no_split_tokens = tokenizer.all_special_tokens
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base",
__slow_tokenizer=tokenizer, tokenizer_file=None,
vocab_file=bpe_vocab,
merges_file=bpe_merges)
print('Vocab size=%d, base vocab size=%d' % (len(tokenizer), tokenizer.vocab_size))
if isinstance(tokenizer, RobertaTokenizerFast) and float(bpe_dropout) > 0.0:
workaround_files = tokenizer._tokenizer.model.save(cache_dir, 'workaround')
tokenizer._tokenizer.model = type(tokenizer._tokenizer.model)(*workaround_files, dropout=float(bpe_dropout))
print(tokenizer.tokenize('<s> what is wrong with <mask> <sep> I do not know either <present>, <absent>, <category> </s>'))
print(tokenizer.encode('<s> what is wrong with <mask> <sep> I do not know either <present>, <absent>, <category> </s>'))
return tokenizer
|
192bb0494fd3d46d34a162b83ba30602adcfce01
|
dfa77374ba7f3f8bf7965e646527dedc622085d7
|
/gcastle/example/daggnn/daggnn_demo.py
|
d14737ed600762f05dedafd9d8ad36e9532632e7
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/trustworthyAI
|
7aa72721df59c3c4f75b7b9a037c5b71fb1284a1
|
238cbc41865ddf629bb6ae92c2e1445be27f98b8
|
refs/heads/master
| 2023-08-28T13:36:10.929075
| 2023-08-15T12:51:46
| 2023-08-15T12:51:46
| 248,501,097
| 832
| 206
|
Apache-2.0
| 2023-08-15T12:51:47
| 2020-03-19T12:46:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
daggnn_demo.py
|
# coding = utf-8
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['CASTLE_BACKEND'] ='pytorch'
from castle.common import GraphDAG
from castle.metrics import MetricsDAG
from castle.datasets import DAG, IIDSimulation
from castle.algorithms import DAG_GNN
type = 'ER' # or `SF`
h = 2 # ER2 when h=5 --> ER5
n_nodes = 10
n_edges = h * n_nodes
method = 'linear'
sem_type = 'gauss'
if type == 'ER':
weighted_random_dag = DAG.erdos_renyi(n_nodes=n_nodes, n_edges=n_edges,
weight_range=(0.5, 2.0), seed=300)
elif type == 'SF':
weighted_random_dag = DAG.scale_free(n_nodes=n_nodes, n_edges=n_edges,
weight_range=(0.5, 2.0), seed=300)
else:
raise ValueError('Just supported `ER` or `SF`.')
dataset = IIDSimulation(W=weighted_random_dag, n=2000,
method=method, sem_type=sem_type)
true_dag, X = dataset.B, dataset.X
# rl learn
gnn = DAG_GNN()
gnn.learn(X)
# plot est_dag and true_dag
GraphDAG(gnn.causal_matrix, true_dag)
# calculate accuracy
met = MetricsDAG(gnn.causal_matrix, true_dag)
print(met.metrics)
|
77dbe3bb98c1d8755d37d707df033c33310294da
|
32712c478ff9dff44de085cb50a1302bfc2eba67
|
/users/migrations/0021_auto_20210512_1053.py
|
d1e7211af2e0947106ab3ffa41c043c57f248c07
|
[
"MIT"
] |
permissive
|
vas3k/vas3k.club
|
158af17c329fe693178ca1bce36466922604df3b
|
b3ff2fd95ef1d6c593c57d3bcd501240f2705fbb
|
refs/heads/master
| 2023-09-03T07:10:10.859004
| 2023-09-01T09:08:32
| 2023-09-01T09:08:32
| 254,190,180
| 697
| 326
|
MIT
| 2023-09-04T09:02:12
| 2020-04-08T20:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
0021_auto_20210512_1053.py
|
# Generated by Django 3.2 on 2021-05-12 10:53
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('users', '0020_auto_20200822_0818'),
]
operations = [
migrations.AlterField(
model_name='user',
name='moderation_status',
field=models.CharField(choices=[('intro', 'intro'), ('on_review', 'on_review'), ('rejected', 'rejected'), ('approved', 'approved'), ('deleted', 'deleted')], db_index=True, default='intro', max_length=32),
),
migrations.CreateModel(
name='Friend',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_subscribed_to_posts', models.BooleanField(default=True)),
('is_subscribed_to_comments', models.BooleanField(default=True)),
('user_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friends_from', to='users.user')),
('user_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friends_to', to='users.user')),
],
options={
'db_table': 'friends',
'unique_together': {('user_from', 'user_to')},
},
),
]
|
b426ae689ded9dfe575803b13531343b58988596
|
c641636e184c0ec1dcc7b851bad678c898cdd05d
|
/examples/deploy_gnn/convert_to_static.py
|
036092fda03143ea90461fb6932dc79c4c61765b
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PGL
|
d8f0a82854a141bee1afdddd9a77bdd723c83ed8
|
7a55649d46d7ad93de31eb9b3ebf71b82d1fcffb
|
refs/heads/main
| 2023-08-17T10:33:02.425526
| 2023-08-04T02:52:06
| 2023-08-04T02:52:06
| 191,286,408
| 1,719
| 341
|
Apache-2.0
| 2023-08-04T02:52:07
| 2019-06-11T03:23:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
convert_to_static.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import pgl
paddle.enable_static()
import paddle.nn as nn
import pgl.nn as gnn
from ogb.utils import smiles2graph
import paddle.static as static
graph_obj = smiles2graph('O=C1C=CC(O1)C(c1ccccc1C)O')
class GNNModel(nn.Layer):
def __init__(self, input_size, output_size, num_layers=3):
super(GNNModel, self).__init__()
self.conv_fn = nn.LayerList()
self.conv_fn.append(gnn.GCNConv(input_size, output_size))
for i in range(num_layers - 1):
self.conv_fn.append(gnn.GCNConv(output_size, output_size))
self.pool_fn = gnn.GraphPool("sum")
def forward(self, num_nodes, edges, feature):
graph = pgl.Graph(num_nodes=num_nodes, edges=edges)
for fn in self.conv_fn:
feature = fn(graph, feature)
output = self.pool_fn(graph, feature)
return output
# Build Model in Static
model = GNNModel(graph_obj["node_feat"].shape[-1], 10)
num_nodes = static.data(name='num_nodes', shape=[-1], dtype='int32')
edges = static.data(name='edges', shape=[-1, 2], dtype='int32')
feature = static.data(
name="feature",
shape=[-1, graph_obj["node_feat"].shape[-1]],
dtype="float32")
output = model(num_nodes, edges, feature)
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
# Load DyGraph Model
state_dict = paddle.load("gnn.pdparam")
model.set_state_dict(state_dict)
prog = static.default_main_program()
feed_dict = {
"num_nodes": np.array(
[graph_obj["node_feat"].shape[0]], dtype="int32"),
"edges": np.array(
graph_obj["edge_index"].T, dtype="int32"),
"feature": graph_obj["node_feat"].astype("float32")
}
out = exe.run(prog, feed=feed_dict, fetch_list=[output])
print(out)
|
b3990ffb01e8bf4f93b7b45e796cab637edaf1e7
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster_tests/daemon_tests/conftest.py
|
93004dacd55e20ba0573b4ae8cca24f084ed65d8
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,423
|
py
|
conftest.py
|
import os
import sys
from typing import Iterator, Optional, cast
import pytest
from dagster import DagsterInstance
from dagster._core.host_representation import (
CodeLocation,
ExternalRepository,
InProcessCodeLocationOrigin,
)
from dagster._core.test_utils import (
InProcessTestWorkspaceLoadTarget,
create_test_daemon_workspace_context,
instance_for_test,
)
from dagster._core.types.loadable_target_origin import LoadableTargetOrigin
from dagster._core.workspace.context import WorkspaceProcessContext
@pytest.fixture(name="instance_module_scoped", scope="module")
def instance_module_scoped_fixture() -> Iterator[DagsterInstance]:
with instance_for_test(
overrides={
"run_launcher": {
"module": "dagster._core.launcher.sync_in_memory_run_launcher",
"class": "SyncInMemoryRunLauncher",
}
}
) as instance:
yield instance
@pytest.fixture(name="instance", scope="function")
def instance_fixture(instance_module_scoped) -> Iterator[DagsterInstance]:
instance_module_scoped.wipe()
instance_module_scoped.wipe_all_schedules()
yield instance_module_scoped
def workspace_load_target(attribute=None):
return InProcessTestWorkspaceLoadTarget(
InProcessCodeLocationOrigin(
loadable_target_origin=loadable_target_origin(attribute=attribute),
location_name="test_location",
)
)
@pytest.fixture(name="workspace_context", scope="module")
def workspace_fixture(instance_module_scoped) -> Iterator[WorkspaceProcessContext]:
with create_test_daemon_workspace_context(
workspace_load_target=workspace_load_target(), instance=instance_module_scoped
) as workspace_context:
yield workspace_context
@pytest.fixture(name="external_repo", scope="module")
def external_repo_fixture(
workspace_context: WorkspaceProcessContext,
) -> Iterator[ExternalRepository]:
yield cast(
CodeLocation,
next(
iter(workspace_context.create_request_context().get_workspace_snapshot().values())
).code_location,
).get_repository("the_repo")
def loadable_target_origin(attribute: Optional[str] = None) -> LoadableTargetOrigin:
return LoadableTargetOrigin(
executable_path=sys.executable,
module_name="dagster_tests.daemon_tests.test_backfill",
working_directory=os.getcwd(),
attribute=attribute,
)
def unloadable_target_origin(attribute: Optional[str] = None) -> LoadableTargetOrigin:
return LoadableTargetOrigin(
executable_path=sys.executable,
module_name="dagster_tests.daemon_tests.test_locations.unloadable_location",
working_directory=os.getcwd(),
attribute=attribute,
)
def invalid_workspace_load_target(attribute=None):
return InProcessTestWorkspaceLoadTarget(
InProcessCodeLocationOrigin(
loadable_target_origin=unloadable_target_origin(attribute=attribute),
location_name="unloadable",
)
)
@pytest.fixture(name="unloadable_location_workspace_context", scope="module")
def unloadable_location_fixture(instance_module_scoped) -> Iterator[WorkspaceProcessContext]:
with create_test_daemon_workspace_context(
workspace_load_target=invalid_workspace_load_target(), instance=instance_module_scoped
) as workspace_context:
yield workspace_context
|
d3a1d53fcbfaeba530246e6cb90c8031a279d30d
|
48ee50316a950d9bc789ae843477b58b2913bf0d
|
/src/app/test/db/mongo/pruner_test.py
|
0053f26f8ebd957d98db5563a9514afdf711b8e2
|
[
"MIT"
] |
permissive
|
beer-garden/beer-garden
|
f6d1c305a261b59d3cb3389513fc3138004a8d07
|
a5fd2dcc2444409e243d3fdaa43d86695e5cb142
|
refs/heads/develop
| 2023-08-15T11:50:29.833953
| 2023-07-20T03:20:45
| 2023-07-20T03:20:45
| 120,045,001
| 254
| 38
|
MIT
| 2023-07-20T03:20:47
| 2018-02-03T00:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,070
|
py
|
pruner_test.py
|
# -*- coding: utf-8 -*-
import datetime
from datetime import timedelta
import pytest
from mock import MagicMock, Mock, patch
from mongomock.gridfs import enable_gridfs_integration
from beer_garden.db.mongo.models import File, RawFile, Request
from beer_garden.db.mongo.pruner import MongoPruner
enable_gridfs_integration()
@pytest.fixture
def collection_mock():
return MagicMock(__name__="MOCK")
@pytest.fixture
def task(collection_mock):
return {
"collection": collection_mock,
"field": "test",
"delete_after": timedelta(microseconds=1),
"additional_query": Mock(),
}
@pytest.fixture
def pruner(task):
return MongoPruner(tasks=[task], cancel_threshold=15)
@pytest.fixture
def negative_pruner(task):
return MongoPruner(tasks=[task], cancel_threshold=-1)
@pytest.fixture
def none_pruner(task):
return MongoPruner(tasks=[task], cancel_threshold=None)
@pytest.fixture
def in_progress():
in_progress = Request(
system="T",
system_version="T",
instance_name="T",
namespace="T",
command="T",
created_at=datetime.datetime(2020, 5, 17),
status="IN_PROGRESS",
)
in_progress.save()
yield in_progress
in_progress.delete()
@pytest.fixture
def created():
created = Request(
system="T1",
system_version="T",
instance_name="T",
namespace="T",
command="T",
created_at=datetime.datetime(2020, 6, 17),
status="CREATED",
)
created.save()
yield created
created.delete()
class TestMongoPruner(object):
@patch("beer_garden.db.mongo.pruner.Q", MagicMock())
def test_prune_something(self, pruner, collection_mock):
pruner._stop_event = Mock(wait=Mock(side_effect=[False, True]))
pruner.run()
assert collection_mock.objects.return_value.no_cache.return_value.delete.called
def test_run_cancels_outstanding_requests(self, pruner, in_progress, created):
pruner._stop_event = Mock(wait=Mock(side_effect=[False, True]))
pruner.run()
new_in_progress = Request.objects.get(id=in_progress.id)
new_created = Request.objects.get(id=created.id)
assert new_in_progress.status == "CANCELED"
assert new_created.status == "CANCELED"
def test_negative_cancel_threshold(self, negative_pruner, in_progress, created):
negative_pruner._stop_event = Mock(wait=Mock(side_effect=[False, True]))
negative_pruner.run()
new_in_progress = Request.objects.get(id=in_progress.id)
new_created = Request.objects.get(id=created.id)
assert new_in_progress.status == "IN_PROGRESS"
assert new_created.status == "CREATED"
def test_none_cancel_threshold(self, none_pruner, in_progress, created):
none_pruner._stop_event = Mock(wait=Mock(side_effect=[False, True]))
none_pruner.run()
new_in_progress = Request.objects.get(id=in_progress.id)
new_created = Request.objects.get(id=created.id)
assert new_in_progress.status == "IN_PROGRESS"
assert new_created.status == "CREATED"
class TestDetermineTasks(object):
def test_determine_tasks(self):
config = {"info": 5, "action": 10, "file": 15}
prune_tasks, run_every = MongoPruner.determine_tasks(**config)
assert len(prune_tasks) == 4
assert run_every == 2.5
info_task = prune_tasks[0]
action_task = prune_tasks[1]
file_task = prune_tasks[2]
raw_file_task = prune_tasks[3]
assert info_task["collection"] == Request
assert action_task["collection"] == Request
assert file_task["collection"] == File
assert raw_file_task["collection"] == RawFile
assert info_task["field"] == "created_at"
assert action_task["field"] == "created_at"
assert file_task["field"] == "updated_at"
assert raw_file_task["field"] == "created_at"
assert info_task["delete_after"] == timedelta(minutes=5)
assert action_task["delete_after"] == timedelta(minutes=10)
assert file_task["delete_after"] == timedelta(minutes=15)
assert raw_file_task["delete_after"] == timedelta(minutes=15)
def test_setup_pruning_tasks_empty(self):
prune_tasks, run_every = MongoPruner.determine_tasks()
assert prune_tasks == []
assert run_every is None
def test_setup_pruning_tasks_one(self):
config = {"info": -1, "action": 1}
prune_tasks, run_every = MongoPruner.determine_tasks(**config)
assert len(prune_tasks) == 1
assert run_every == 0.5
def test_setup_pruning_tasks_mixed(self):
config = {"info": 5, "action": -1}
prune_tasks, run_every = MongoPruner.determine_tasks(**config)
assert len(prune_tasks) == 1
assert run_every == 2.5
info_task = prune_tasks[0]
assert info_task["collection"] == Request
assert info_task["field"] == "created_at"
assert info_task["delete_after"] == timedelta(minutes=5)
|
99f4ce34fec158fd9785b632bd3c94270e1c1b40
|
2853845c003d03db22f67c3303fa1ec333180ae7
|
/test/channel/greeter_pb2.py
|
6f4df40766e1d2a2f3019cc8929d3d770f6f094e
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
bytedance/fedlearner
|
fc1dd2ba2ec88092e83a32732eccea52451ce552
|
436e4959952c970917ee8f47b920f0a76cd4dd05
|
refs/heads/master
| 2023-08-14T23:01:02.875453
| 2023-05-23T03:44:03
| 2023-05-23T03:44:03
| 235,348,659
| 893
| 243
|
Apache-2.0
| 2023-06-08T07:37:18
| 2020-01-21T13:26:35
|
Python
|
UTF-8
|
Python
| false
| true
| 5,225
|
py
|
greeter_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: test/channel/greeter.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='test/channel/greeter.proto',
package='test.channel',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1atest/channel/greeter.proto\x12\x0ctest.channel\"\x17\n\x07Request\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1b\n\x08Response\x12\x0f\n\x07message\x18\x01 \x01(\t2\xa5\x02\n\x07Greeter\x12\x42\n\x0fHelloUnaryUnary\x12\x15.test.channel.Request\x1a\x16.test.channel.Response\"\x00\x12\x45\n\x10HelloUnaryStream\x12\x15.test.channel.Request\x1a\x16.test.channel.Response\"\x00\x30\x01\x12\x45\n\x10HelloStreamUnary\x12\x15.test.channel.Request\x1a\x16.test.channel.Response\"\x00(\x01\x12H\n\x11HelloStreamStream\x12\x15.test.channel.Request\x1a\x16.test.channel.Response\"\x00(\x01\x30\x01\x62\x06proto3'
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='test.channel.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='test.channel.Request.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=67,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='test.channel.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='test.channel.Response.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=96,
)
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _REQUEST,
'__module__' : 'test.channel.greeter_pb2'
# @@protoc_insertion_point(class_scope:test.channel.Request)
})
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _RESPONSE,
'__module__' : 'test.channel.greeter_pb2'
# @@protoc_insertion_point(class_scope:test.channel.Response)
})
_sym_db.RegisterMessage(Response)
_GREETER = _descriptor.ServiceDescriptor(
name='Greeter',
full_name='test.channel.Greeter',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=99,
serialized_end=392,
methods=[
_descriptor.MethodDescriptor(
name='HelloUnaryUnary',
full_name='test.channel.Greeter.HelloUnaryUnary',
index=0,
containing_service=None,
input_type=_REQUEST,
output_type=_RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='HelloUnaryStream',
full_name='test.channel.Greeter.HelloUnaryStream',
index=1,
containing_service=None,
input_type=_REQUEST,
output_type=_RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='HelloStreamUnary',
full_name='test.channel.Greeter.HelloStreamUnary',
index=2,
containing_service=None,
input_type=_REQUEST,
output_type=_RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='HelloStreamStream',
full_name='test.channel.Greeter.HelloStreamStream',
index=3,
containing_service=None,
input_type=_REQUEST,
output_type=_RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name['Greeter'] = _GREETER
# @@protoc_insertion_point(module_scope)
|
be9aa958a3b5dc59d0b33c18e0bb89b549c94962
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/veh_post_progression/veh_post_progression_cfg_view.py
|
c5dd53d2499ab8d48b51d873e2d4f01359c9dd00
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,720
|
py
|
veh_post_progression_cfg_view.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/veh_post_progression/veh_post_progression_cfg_view.py
from functools import partial
from adisp import process
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.go_back_helper import BackButtonContextKeys
from gui.Scaleform.daapi.view.lobby.veh_post_progression.veh_post_progression_vehicle import g_postProgressionVehicle
from gui.Scaleform.daapi.view.lobby.vehicle_preview.vehicle_preview import VEHICLE_PREVIEW_ALIASES
from gui.Scaleform.daapi.view.meta.VehiclePostProgressionViewMeta import VehiclePostProgressionViewMeta
from gui.Scaleform.framework.managers.loaders import SFViewLoadParams
from gui.Scaleform.genConsts.HANGAR_ALIASES import HANGAR_ALIASES
from gui.impl.lobby.veh_post_progression.post_progression_intro import getPostProgressionInfoWindowProc
from gui.shared import event_dispatcher as shared_events
from gui.shared import events, EVENT_BUS_SCOPE, g_eventBus
from gui.shared.gui_items.items_actions import factory as ActionsFactory
from gui.veh_post_progression.sounds import PP_VIEW_SOUND_SPACE
from gui.veh_post_progression.vo_builders.cfg_page_vos import getDataVO, getTitleVO
from helpers import dependency
from nation_change.nation_change_helpers import iterVehTypeCDsInNationGroup
from skeletons.gui.game_control import IVehicleComparisonBasket, IHeroTankController
from skeletons.gui.shared import IItemsCache
_HERO_PREVIEW_ALIASES = (VIEW_ALIAS.HERO_VEHICLE_PREVIEW, VIEW_ALIAS.RESOURCE_WELL_HERO_VEHICLE_PREVIEW)
def _defaultExitEvent():
return events.LoadViewEvent(SFViewLoadParams(VIEW_ALIAS.LOBBY_HANGAR), name=VIEW_ALIAS.LOBBY_HANGAR)
class VehiclePostProgressionCfgView(VehiclePostProgressionViewMeta):
_COMMON_SOUND_SPACE = PP_VIEW_SOUND_SPACE
_PROGRESSION_INJECT_ALIAS = HANGAR_ALIASES.POST_PROGRESSION_INJECT
__cmpBasket = dependency.descriptor(IVehicleComparisonBasket)
__itemsCache = dependency.descriptor(IItemsCache)
__heroTanks = dependency.descriptor(IHeroTankController)
def __init__(self, ctx=None):
super(VehiclePostProgressionCfgView, self).__init__(ctx)
self._intCD = ctx['intCD']
self._exitEvent = ctx.get(BackButtonContextKeys.EXIT) or _defaultExitEvent()
def onAboutClick(self):
getPostProgressionInfoWindowProc().show(self.getParentWindow())
def onGoBackClick(self):
self._onExit()
def compareVehicle(self):
self.__cmpBasket.addVehicle(self._intCD)
@process
def demountAllPairs(self):
vehicle = self._vehicle
toDiscardIDs = vehicle.postProgression.getInstalledMultiIds()
action = ActionsFactory.getAction(ActionsFactory.DISCARD_POST_PROGRESSION_PAIRS, vehicle, *toDiscardIDs)
yield ActionsFactory.asyncDoAction(action)
def goToVehicleView(self):
if self._vehicle.isPreviewAllowed():
if self._exitEvent.alias in VEHICLE_PREVIEW_ALIASES:
self._onExit()
else:
backCb = partial(shared_events.showVehPostProgressionView, self._intCD, self._exitEvent)
shared_events.showVehiclePreview(self._intCD, self.alias, previewBackCb=backCb)
elif self._vehicle.isInInventory:
shared_events.selectVehicleInHangar(self._intCD)
def _addListeners(self):
super(VehiclePostProgressionCfgView, self)._addListeners()
g_clientUpdateManager.addCallbacks({'stats.freeXP': self._updateData,
'cache.mayConsumeWalletResources': self._updateData})
self.__cmpBasket.onChange += self.__onCmpBasketChange
self.__cmpBasket.onSwitchChange += self._updateData
progressionInjectView = self._progressionInject.getInjectView()
progressionInjectView.onGoBackAction += self.__onGoBackAction
progressionInjectView.onResearchAction += self.__onResearchAction
def _removeListeners(self):
g_clientUpdateManager.removeObjectCallbacks(self)
self.__cmpBasket.onSwitchChange -= self._updateData
self.__cmpBasket.onChange -= self.__onCmpBasketChange
progressionInjectView = self._progressionInject.getInjectView()
if progressionInjectView:
progressionInjectView.onResearchAction -= self.__onResearchAction
progressionInjectView.onGoBackAction -= self.__onGoBackAction
super(VehiclePostProgressionCfgView, self)._removeListeners()
def _onExit(self):
if self._exitEvent.alias in _HERO_PREVIEW_ALIASES and self._exitEvent.ctx.get('itemCD') == self.__heroTanks.getCurrentTankCD():
self.__goToHeroTank()
else:
g_eventBus.handleEvent(self._exitEvent, scope=EVENT_BUS_SCOPE.LOBBY)
def _getDiffVehicle(self):
return self.__itemsCache.items.getVehicleCopy(self._vehicle)
def _getModVehicle(self):
return self.__itemsCache.items.getVehicleCopy(self._vehicle)
def _getVehicle(self):
return self.__itemsCache.items.getItemByCD(self._intCD)
def _checkNationChange(self):
if not self._vehicle.activeInNationGroup:
self._intCD = iterVehTypeCDsInNationGroup(self._vehicle.intCD).next()
self._progressionInject.getInjectView().invalidateVehicle(self._intCD)
g_postProgressionVehicle.setCustomVehicle(None)
self._updateVehicle()
return
def _updateData(self, *_):
freeExp = self.__itemsCache.items.stats.actualFreeXP
self.as_setDataS(getDataVO(self._vehicle, freeExp, self._exitEvent))
def _updateTitle(self):
self.as_setVehicleTitleS(getTitleVO(self._vehicle))
def __onCmpBasketChange(self, changedData, _=None):
if changedData.isFullChanged:
self._updateData()
def __onGoBackAction(self):
self.as_onEscPressedS()
def __onResearchAction(self):
exitToTechTree = events.LoadViewEvent(SFViewLoadParams(VIEW_ALIAS.LOBBY_TECHTREE), ctx={BackButtonContextKeys.NATION: self._vehicle.nationName})
exitToResearchPage = events.LoadViewEvent(SFViewLoadParams(VIEW_ALIAS.LOBBY_RESEARCH), ctx={BackButtonContextKeys.ROOT_CD: self._vehicle.intCD,
BackButtonContextKeys.EXIT: exitToTechTree})
g_eventBus.handleEvent(exitToResearchPage, scope=EVENT_BUS_SCOPE.LOBBY)
def __goToHeroTank(self):
ctx = self._exitEvent.ctx
shared_events.goToHeroTankOnScene(vehTypeCompDescr=ctx.get('itemCD'), previewAlias=ctx.get('previewAlias'), previewBackCb=ctx.get('previewBackCb'), previousBackAlias=ctx.get('previousBackAlias'), hangarVehicleCD=ctx.get('hangarVehicleCD'))
|
04d5b43fe3692ece408a356db84c00edbcd3388a
|
fc1dbf86c08788d9d47b933a206331e0d3af3f58
|
/Chapter_14/ch14_r04.py
|
dd51b278aef6bbee22d569f445e173425eb0d6e6
|
[] |
no_license
|
PacktPublishing/Modern-Python-Cookbook-Second-Edition
|
819e17fe1f3d7e4389a290bde699fb45e08543ab
|
43c2549d51f05df6d897753a0d1e979f71b0729d
|
refs/heads/master
| 2023-08-01T09:35:33.833038
| 2023-07-29T13:27:30
| 2023-07-29T13:27:30
| 225,331,228
| 107
| 60
| null | 2023-07-29T13:28:03
| 2019-12-02T09:06:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
ch14_r04.py
|
"""Python Cookbook
Chapter 14, recipe 4, Wrapping and combining CLI applications
This uses an explicit `python` command
so Chapter_13/ch13_r05.py does not have to be marked executable.
"""
import argparse
from pathlib import Path
import subprocess
import sys
from typing import List, Optional
def get_options(
argv: Optional[List[str]] = None
) -> argparse.Namespace:
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("directory", type=Path)
parser.add_argument("games", type=int)
options = parser.parse_args(argv)
return options
def make_files(directory: Path, files: int = 100) -> None:
"""Create sample data files."""
for n in range(files):
filename = directory / f"game_{n}.yaml"
command = [
"python", # Can be removed if the app is executable
"Chapter_13/ch13_r05.py",
"--samples",
"10",
"--output",
str(filename),
]
subprocess.run(command, check=True)
def make_files_clean(directory: Path, files: int = 100) -> None:
"""Create sample data files, with cleanup after a failure."""
try:
make_files(directory, files)
except subprocess.CalledProcessError as ex:
# Remove any files.
for partial in directory.glob("game_*.yaml"):
partial.unlink()
raise
def main() -> None:
options = get_options()
make_files_clean(options.directory, options.games)
if __name__ == "__main__":
main()
|
66b705bfd7d135482d2d66ae923dbaf0bd005a40
|
07e810873aa0134ba5017ccfef641d1038ca9b92
|
/hs_access_control/urls.py
|
0fbae1274b88c20a2875f0ffdd729222b395c2c0
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
hydroshare/hydroshare
|
9093e6dce047a30d4b2b7720257a7841d209353f
|
69855813052243c702c9b0108d2eac3f4f1a768f
|
refs/heads/develop
| 2023-09-04T12:52:30.816709
| 2023-08-30T16:46:20
| 2023-08-30T16:46:20
| 24,703,136
| 207
| 57
|
BSD-3-Clause
| 2023-09-14T20:20:16
| 2014-10-02T02:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
urls.py
|
from django.conf.urls import url
from hs_access_control import views
urlpatterns = [
# Community responders return JSON
url(r'^_internal/community/(?P<cid>[0-9]+)/(?P<action>[a-z]+)/(?P<gid>[0-9]+)/$',
views.CommunityView.as_view(), name='access_manage_community'),
url(r'^_internal/community/(?P<cid>[0-9]+)/(?P<action>owner)/(?P<uid>[a-zA-Z0-9]+)/(?P<addrem>add|remove)$',
views.CommunityView.as_view(), name='access_manage_community'),
url(r'^_internal/community/(?P<cid>[0-9]+)/(?P<action>[a-z]+)/',
views.CommunityView.as_view(), name='access_manage_community'),
# Group responders return JSON
url(r'^_internal/group/(?P<gid>[0-9]+)/$',
views.GroupView.as_view(), name='access_manage_group'),
url(r'^_internal/group/(?P<gid>[0-9]+)/(?P<action>[a-z_ ]*)/(?P<cid>[0-9]+)/$',
views.GroupView.as_view(), name='access_manage_group'),
# Community request responders also return JSON
url(r'^_internal/crequest/$',
views.CommunityRequestView.as_view(), name='access_manage_crequests'),
url(r'^_internal/crequest/(?P<action>[a-z_]+)/$',
views.CommunityRequestView.as_view(), name='access_manage_crequests'),
url(r'^_internal/crequest/(?P<action>[a-z_]+)/(?P<crid>[0-9]+)/$',
views.CommunityRequestView.as_view(), name='access_manage_crequests'),
]
|
134a650b9e82202f13fe5a6a95270c35adeda43a
|
e3cfab409afb5ff9a0b3812bf848be6ca9239cee
|
/test/testWgrs.py
|
892680a009ca6dc350744f06f59d49eaf54ece91
|
[
"MIT"
] |
permissive
|
mrJean1/PyGeodesy
|
565266a4f7f6cda5abe98e915bbd868f6cbe1760
|
eba35704b248a7a0388b30f3cea19793921e99b7
|
refs/heads/master
| 2023-08-23T13:58:20.069917
| 2023-08-20T18:50:45
| 2023-08-20T18:50:45
| 68,028,481
| 283
| 66
| null | 2022-04-09T00:40:52
| 2016-09-12T16:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,315
|
py
|
testWgrs.py
|
# -*- coding: utf-8 -*-
# Test L{wgrs} module.
__all__ = ('Tests',)
__version__ = '23.03.27'
from bases import TestsBase
from pygeodesy import degDMS, fstr, Georef, S_DEG, S_MIN, wgrs
def _fstr(floats, prec=6):
return ', '.join('None' if f is None else fstr(f, prec=prec) for f in floats)
class Tests(TestsBase):
def testCodec3(self, g, x, prec=4):
self.test('codec3', Georef(g), g)
t = wgrs.decode3(g)
self.test('decode3', _fstr(t, prec=prec), x)
self.test('encode', wgrs.encode(*t), g)
def testCodec5(self, g, x, prec=4):
self.test('codec5', Georef(g), g)
t = wgrs.decode5(g)
self.test('decode5', _fstr(t, prec=prec), x)
self.test('encode', wgrs.encode(*t), g)
def testGeoref(self, LL):
# Karney's geographiclib/1.49/examples/example-Georef.cpp
# <https://SourceForge.net/p/geographiclib/code/ci/release/tree/examples/example-Georef.cpp>
g = Georef('57.64911, 10.40744', precision=6)
self.test('Georef', repr(g), "'NKLN2444638946'")
self.test('Georef', g.toRepr(), "Georef('NKLN2444638946')")
self.test('Georef', str(g), g.toStr()) # 'NKLN2444638946'
self.test('Georef.latlon', fstr(g.latlon, prec=5), '57.64911, 10.40744')
ll = g.toLatLon(LL)
self.test('Georef.toLatLon', repr(ll), 'LatLon(57°38′56.8″N, 010°24′26.78″E)')
self.testCodec3(g, '57.64911, 10.40744, 6.0', prec=5)
g = Georef(ll, precision=6)
self.test('Georef', repr(g), "'NKLN2444638946H0'")
self.test('Georef', g.toRepr(), "Georef('NKLN2444638946H0')")
self.test('Georef', str(g), g.toStr()) # 'NKLN2444638946H0'
self.test('Georef.latlon', fstr(g.latlon, prec=5), '57.64911, 10.40744')
self.test('Georef.precision', g.precision, 6)
self.test('Georef.radius', g.radius, None)
# <https://WikiPedia.org/wiki/World_Geographic_Reference_System>
g = Georef('38.286108, -76.4291704', precision=6)
self.test('Georef', repr(g), "'GJPJ3424917166'")
self.test('Georef', g.toRepr(), "Georef('GJPJ3424917166')")
self.test('Georef', str(g), g.toStr()) # 'GJPJ3424917166'
self.test('Georef.latlon', fstr(g.latlon, prec=6), '38.286108, -76.42917')
ll = g.toLatLon(LL)
self.test('Georef.toLatLon', repr(ll), 'LatLon(38°17′09.99″N, 076°25′45.01″W)')
self.testCodec3(g, '38.286108, -76.429175, 6.0', prec=6)
g = Georef(ll, precision=6)
self.test('Georef', repr(g), "'GJPJ3424917166H0'")
self.test('Georef', g.toRepr(), "Georef('GJPJ3424917166H0')")
self.test('Georef', str(g), g.toStr()) # 'GJPJ3424917166H0'
self.test('Georef.latlon', fstr(g.latlon, prec=6), '38.286108, -76.42917')
self.test('Georef.precision', g.precision, 6)
self.test('Georef.radius', g.radius, None)
t = g.toLatLon() # LatLon=None
self.test('Georef.3Tuple', fstr(t, prec=6), '38.286108, -76.42917, 0.0')
# <https://Earth-Info.NGA.mil/GandG/coordsys/grids/georef.pdf>
self.testCodec3('MKPG1204', '51.075, -1.7917, 3.0', prec=4)
# <https://www.Map-Reading.com/ch4-4.php>
self.testCodec3('WJKG1503', '36.0583, 129.2583, 3.0', prec=4)
# <https://WikiPedia.org/wiki/World_Geographic_Reference_System>
self.testCodec5('GJPJ4103R5', '38.0583, -76.3083, 3.0, None, 9260.0', prec=4)
self.testCodec5('GJPJ4103H17', '38.0583, -76.3083, 3.0, 5181.6, None', prec=4)
self.testCodec5('GJPJ4103R5H17', '38.0583, -76.3083, 3.0, 5181.6, 9260.0', prec=4)
for t in range(-1, 13):
r = wgrs.resolution(t)
p = wgrs.precision(r)
self.test('precision', t, p, known=t < 0 or t > 11)
b = degDMS(r, prec=t if r < 1 else 0, s_S='') # no S_SEC
x = ('15' + S_DEG) if p < 1 else (
( '1' + S_DEG) if p < 2 else ('0.%s1%s' % ('0' * (p - 2), S_MIN)))
self.test('resolution', b, x) # also to test degDMS
if __name__ == '__main__':
from pygeodesy import ellipsoidalVincenty
t = Tests(__file__, __version__, wgrs)
t.testGeoref(ellipsoidalVincenty.LatLon)
t.results()
t.exit()
|
c9725381d878e89b25ac98eec75cb726cbac58f0
|
7f59e2c4e771c19378e9839406c220d3985e7efe
|
/public-engines/iris-species-engine/tests/data_handler/test_acquisitor_and_cleaner.py
|
d12d027a26916f302cfab847675b1983e77c6f37
|
[
"Apache-2.0"
] |
permissive
|
apache/incubator-marvin
|
c6ff32d50eb01ccd84266587d79f562a9e371496
|
58fdccf2e677041a13966ddbdd96d484edf3b474
|
refs/heads/develop
| 2023-08-30T12:46:56.973102
| 2022-11-18T15:27:52
| 2022-11-18T15:27:52
| 148,087,939
| 112
| 77
|
Apache-2.0
| 2023-03-07T05:45:59
| 2018-09-10T02:27:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
test_acquisitor_and_cleaner.py
|
#!/usr/bin/env python
# coding=utf-8
try:
import mock
except ImportError:
import unittest.mock as mock
from marvin_iris_species_engine.data_handler import AcquisitorAndCleaner
@mock.patch('marvin_iris_species_engine.data_handler.acquisitor_and_cleaner.pd.read_csv')
@mock.patch('marvin_python_toolbox.common.data.MarvinData.download_file')
def test_execute(download_mocked, csv_mocked, mocked_params):
ac = AcquisitorAndCleaner()
mocked_params["data_url"] = "www.test_url.com"
ac.execute(params=mocked_params)
download_mocked.assert_called_once_with(url='www.test_url.com')
csv_mocked.assert_called_once()
|
55294977d8df3c1208d6b8aa28b33b6969ec55a7
|
8c0a92d54ea8b8c07648d454529cba588081ce12
|
/environments/mobile_robot/test_env.py
|
946d98fa37a5ec7c196691391b65a486c913b2fd
|
[
"MIT"
] |
permissive
|
araffin/robotics-rl-srl
|
79c1e7f34b5a28367fabbe80e7cfe81e7693cd4c
|
eae7c1ab310c79662f6e68c0d255e08641037ffa
|
refs/heads/master
| 2023-08-25T17:09:16.050197
| 2021-04-05T18:43:17
| 2021-04-05T18:43:17
| 118,007,580
| 590
| 102
|
MIT
| 2019-08-09T09:30:36
| 2018-01-18T16:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 926
|
py
|
test_env.py
|
import time
import environments.mobile_robot.mobile_robot_env as mobile_env
env = mobile_env.MobileRobotGymEnv(renders=True, is_discrete=True, log_folder="mobile_robot", record_data=False, random_target=False)
timesteps = 1000 # must be greater than MAX_STEPS
episodes = 100
env.seed(1)
i = 0
print('Starting episodes...')
start_time = time.time()
try:
for _ in range(episodes):
observation = env.reset()
for t in range(timesteps):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
env.render() # render() requires first the observation to be obtained
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
i += 1
except KeyboardInterrupt:
pass
print("Avg. frame rate: {:.2f} FPS".format(i / (time.time() - start_time)))
|
9ed7b2b7ef3c8231eee8bc8750f0bd2b1c6cb7ba
|
746bd6e9242053e15bd49e6eb1f6e7940455fc0f
|
/lib/mirror/tools/common.py
|
4383c959618660013aef7ffbcb8b9b481401de43
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
AIBluefisher/DAGSfM
|
34924b16bf55e0508a62ae4373e5e0dbba270d48
|
0ab0630bd4dcfab40729a239fbb396000a237dd0
|
refs/heads/dev
| 2023-05-31T05:11:26.653353
| 2022-07-01T09:21:19
| 2022-07-01T09:21:19
| 161,630,883
| 119
| 29
|
BSD-3-Clause
| 2022-07-01T09:21:35
| 2018-12-13T11:43:23
|
C
|
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
common.py
|
#!/usr/bin/env python
"""
Copyright 2017, Zixin Luo, HKUST.
Commonly used functions
"""
from __future__ import print_function
import os
from datetime import datetime
class ClassProperty(property):
"""For dynamically obtaining system time"""
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
class Notify(object):
"""Colorful printing prefix.
A quick example:
print(Notify.INFO, YOUR TEXT, Notify.ENDC)
"""
def __init__(self):
pass
@ClassProperty
def HEADER(cls):
return str(datetime.now()) + ': \033[95m'
@ClassProperty
def INFO(cls):
return str(datetime.now()) + ': \033[92mI'
@ClassProperty
def OKBLUE(cls):
return str(datetime.now()) + ': \033[94m'
@ClassProperty
def WARNING(cls):
return str(datetime.now()) + ': \033[93mW'
@ClassProperty
def FAIL(cls):
return str(datetime.now()) + ': \033[91mF'
@ClassProperty
def BOLD(cls):
return str(datetime.now()) + ': \033[1mB'
@ClassProperty
def UNDERLINE(cls):
return str(datetime.now()) + ': \033[4mU'
ENDC = '\033[0m'
def read_list(list_path):
"""Read list."""
if list_path is None or not os.path.exists(list_path):
print(Notify.FAIL, 'Not exist', list_path, Notify.ENDC)
exit()
content = open(list_path).read().splitlines()
return content
def write_list(list_in, path_save):
"""Write list."""
fout = open(path_save, 'w')
fout.write('\n'.join(list_in))
def replace_str_in_file(list_in, orig_str, dest_str):
"""Replace strings in a file."""
if os.path.exists(list_in):
content = open(list_in).read()
new_content = content.replace(orig_str, dest_str)
open(list_in, 'w').write(new_content)
else:
print(Notify.WARNING + 'Not exist', list_in, Notify.ENDC)
|
b2d092d3fd2d9875903178d628476594da064621
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/template_tests/syntax_tests/test_with.py
|
dde581eab30be85fa98525a7ac073a445a528979
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
test_with.py
|
from django.template import TemplateSyntaxError
from django.template.defaulttags import WithNode
from django.test import SimpleTestCase
from ..utils import setup
class WithTagTests(SimpleTestCase):
at_least_with_one_msg = "'with' expected at least one variable assignment"
@setup({"with01": "{% with key=dict.key %}{{ key }}{% endwith %}"})
def test_with01(self):
output = self.engine.render_to_string("with01", {"dict": {"key": 50}})
self.assertEqual(output, "50")
@setup({"legacywith01": "{% with dict.key as key %}{{ key }}{% endwith %}"})
def test_legacywith01(self):
output = self.engine.render_to_string("legacywith01", {"dict": {"key": 50}})
self.assertEqual(output, "50")
@setup(
{
"with02": "{{ key }}{% with key=dict.key %}"
"{{ key }}-{{ dict.key }}-{{ key }}"
"{% endwith %}{{ key }}"
}
)
def test_with02(self):
output = self.engine.render_to_string("with02", {"dict": {"key": 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, "INVALID50-50-50INVALID")
else:
self.assertEqual(output, "50-50-50")
@setup(
{
"legacywith02": "{{ key }}{% with dict.key as key %}"
"{{ key }}-{{ dict.key }}-{{ key }}"
"{% endwith %}{{ key }}"
}
)
def test_legacywith02(self):
output = self.engine.render_to_string("legacywith02", {"dict": {"key": 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, "INVALID50-50-50INVALID")
else:
self.assertEqual(output, "50-50-50")
@setup({"with03": "{% with a=alpha b=beta %}{{ a }}{{ b }}{% endwith %}"})
def test_with03(self):
output = self.engine.render_to_string("with03", {"alpha": "A", "beta": "B"})
self.assertEqual(output, "AB")
@setup({"with-error01": "{% with dict.key xx key %}{{ key }}{% endwith %}"})
def test_with_error01(self):
with self.assertRaisesMessage(TemplateSyntaxError, self.at_least_with_one_msg):
self.engine.render_to_string("with-error01", {"dict": {"key": 50}})
@setup({"with-error02": "{% with dict.key as %}{{ key }}{% endwith %}"})
def test_with_error02(self):
with self.assertRaisesMessage(TemplateSyntaxError, self.at_least_with_one_msg):
self.engine.render_to_string("with-error02", {"dict": {"key": 50}})
class WithNodeTests(SimpleTestCase):
def test_repr(self):
node = WithNode(nodelist=[], name="a", var="dict.key")
self.assertEqual(repr(node), "<WithNode>")
|
0ff82136a92d84820c72a4a7538edbd5a295a609
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
|
e19a2a38e37b64ab6f9f6ea9b589b223de8cae62
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
column_values_length_max.py
|
import pandas as pd
from great_expectations.compatibility.pyspark import functions as F
from great_expectations.compatibility.sqlalchemy import sqlalchemy as sa
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
ColumnAggregateMetricProvider,
column_aggregate_partial,
column_aggregate_value,
)
class ColumnValuesLengthMax(ColumnAggregateMetricProvider):
metric_name = "column_values.length.max"
@column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
return column.map(len).max()
@column_aggregate_partial(
engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
)
def _sqlalchemy(cls, column, **kwargs: dict):
return sa.func.max(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
def _spark(cls, column, **kwargs: dict):
return F.max(F.length(column))
|
8d6f0f9defe325257fc68f9604629f2581fa9458
|
a699a524ce54a94436c69e0469cd09ced99a009d
|
/tests/functional/python/py2index.py
|
850aec825b7fca25ecb4c4374a2c109611d79c16
|
[
"ISC"
] |
permissive
|
sarugaku/resolvelib
|
01f78cfcee85fcf8a9f0db4198dff017a5a5f812
|
77b256cdfb747e86112025d75cd698861ce355a5
|
refs/heads/main
| 2023-09-04T03:07:52.750922
| 2023-08-04T01:58:01
| 2023-08-04T01:58:01
| 136,112,488
| 114
| 29
|
ISC
| 2023-09-11T09:20:37
| 2018-06-05T03:07:00
|
Python
|
UTF-8
|
Python
| false
| false
| 9,932
|
py
|
py2index.py
|
"""Freeze metadata from Python index server to test locally.
Inspired by index_from_rubygems.rb from CocoaPods/Resolver-Integration-Specs.
This only reads metadata from wheels compatible with the given platform, and
does not cover sdists at all.
"""
from __future__ import annotations
import argparse
import collections
import dataclasses
import email.parser
import itertools
import json
import logging
import os
import pathlib
import re
import sys
import urllib.parse
import zipfile
from typing import (
IO,
BinaryIO,
Dict,
FrozenSet,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Union,
cast,
)
import html5lib
import packaging.requirements
import packaging.tags
import packaging.utils
import packaging.version
import requests
logger = logging.getLogger()
PythonVersion = Union[Tuple[int], Tuple[int, int]]
def _parse_python_version(s: str) -> PythonVersion:
match = re.match(r"^(\d+)(?:\.(\d+))?$", s)
if not match:
raise ValueError(s)
major, *more = match.groups()
if more:
return (int(major), int(more[0]))
return (int(major),)
def _parse_output_path(s: str) -> Optional[pathlib.Path]:
if s == "-":
return None
if os.sep in s or (os.altsep and os.altsep in s):
return pathlib.Path(s)
return pathlib.Path(__file__).with_name("inputs").joinpath("index", s)
def parse_args(args: Optional[List[str]]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"package_names",
metavar="PACKAGE",
nargs="+",
type=packaging.utils.canonicalize_name,
)
parser.add_argument(
"--python-version",
dest="python_version",
type=_parse_python_version,
default=".".join(str(v) for v in sys.version_info[:2]),
)
parser.add_argument(
"--interpreter",
default=None,
)
parser.add_argument(
"--platform",
dest="platforms",
action="append",
default=None,
)
parser.add_argument(
"--output",
type=_parse_output_path,
required=True,
)
parser.add_argument(
"--overwrite",
action="store_true",
default=False,
)
return parser.parse_args(args)
def get_output_path(path: pathlib.Path, overwrite: bool) -> pathlib.Path:
if path.suffix != ".json":
path = path.with_name(path.name + ".json")
if path.is_file() and not overwrite:
raise FileExistsError(os.fspath(path))
path.parent.mkdir(parents=True, exist_ok=True)
return path
def _parse_tag(s: str) -> FrozenSet[packaging.tags.Tag]:
try:
return packaging.tags.parse_tag(s)
except ValueError:
raise ValueError(f"invalid tag {s!r}")
@dataclasses.dataclass()
class WheelMatcher:
required_python: packaging.version.Version
tags: Dict[packaging.tags.Tag, int]
@classmethod
def compatible_with(
cls,
python_version: PythonVersion,
impl: Optional[str],
plats: Optional[List[str]],
) -> WheelMatcher:
required_python = packaging.version.Version(
".".join(str(v) for v in python_version)
)
# TODO: Add ABI customization.
tag_it = itertools.chain(
packaging.tags.compatible_tags(python_version, impl, plats),
packaging.tags.cpython_tags(python_version, None, plats),
)
tags = {t: i for i, t in enumerate(tag_it)}
return cls(required_python, tags)
def rank(self, tag: str, requires_python: Optional[str]) -> Optional[int]:
if requires_python:
spec = packaging.specifiers.SpecifierSet(requires_python)
if self.required_python not in spec:
return None
ranks = [self.tags[t] for t in _parse_tag(tag) if t in self.tags]
if not ranks:
return None
return min(ranks)
@dataclasses.dataclass()
class HttpFile:
url: str
session: requests.Session
def __post_init__(self):
self._offset = 0
self._size = int(self.session.head(self.url).headers["Content-Length"])
def read(self, n=None):
if n is None:
end = self._size
else:
end = self._offset + n
headers = {"Range": f"bytes={self._offset}-{end - 1}"}
res = self.session.get(self.url, headers=headers)
data = res.content
self._offset += len(data)
return data
def seek(self, offset, whence=0):
if whence == 0:
self._offset = offset
elif whence == 1:
self._offset += offset
elif whence == 2:
self._offset = self._size + offset
else:
err = f"ValueError: invalid whence ({whence}, should be 0, 1 or 2)"
raise ValueError(err)
def seekable(self):
return True
def tell(self):
return self._offset
def _parse_wheel_name(rest: str) -> Tuple[str, str, str]:
name, rest = rest.split("-", 1)
version, x, y, z = rest.rsplit("-", 3)
return name, version, f"{x}-{y}-{z}"
def _open_metadata(zf: zipfile.ZipFile, prefix: str) -> IO[bytes]:
for fn in zf.namelist():
if not fn.endswith(".dist-info/METADATA"):
continue
if packaging.utils.canonicalize_name(fn).startswith(prefix):
return zf.open(fn)
raise ValueError("Can't find metadata")
class PackageEntry(NamedTuple):
version: str
dependencies: List[str]
DistListMapping = Dict[str, List[Tuple[int, str]]]
@dataclasses.dataclass()
class Finder:
index_urls: List[str]
matcher: WheelMatcher
session: requests.Session
def collect_best_dist_urls(self, name: str) -> Dict[str, str]:
all_dists: DistListMapping = collections.defaultdict(list)
for index_url in self.index_urls:
res = requests.get(f"{index_url}/{name}")
res.raise_for_status()
doc = html5lib.parse(res.content, namespaceHTMLElements=False)
for el in doc.findall(".//a"):
url = el.attrib["href"]
filename = urllib.parse.urlsplit(url).path.rsplit("/", 1)[-1]
wheel_name, ext = filename.rsplit(".", 1)
if ext != "whl":
continue
requires_python = el.attrib.get("data-requires-python")
name, version, tag = _parse_wheel_name(wheel_name)
try:
rank = self.matcher.rank(tag, requires_python)
except packaging.specifiers.InvalidSpecifier:
logger.critical(
"Dropping %s==%s; invalid Requires-Python %r",
name,
version,
requires_python,
)
continue
if rank is None:
continue
all_dists[version].append((rank, url))
urls = {version: min(dists)[1] for version, dists in all_dists.items()}
logger.info("%d URLs found for %s", len(urls), name)
return urls
def iter_package_entries(self, name: str) -> Iterator[PackageEntry]:
for version, url in self.collect_best_dist_urls(name).items():
http_file = cast(IO[bytes], HttpFile(url, self.session))
with zipfile.ZipFile(http_file) as zf:
with _open_metadata(zf, name) as f:
parser = email.parser.BytesParser()
data = parser.parse(cast(BinaryIO, f), headersonly=True)
dependencies: List[str] = data.get_all("Requires-Dist", [])
yield PackageEntry(version, dependencies)
def process_package_entry(
self, name: str, entry: PackageEntry
) -> Optional[Set[str]]:
more = set()
for dep in entry.dependencies:
try:
req = packaging.requirements.Requirement(dep)
except packaging.requirements.InvalidRequirement:
logger.critical(
"Dropping %s==%s; invalid dependency %r",
name,
entry.version,
dep,
)
return None
more.add(str(packaging.utils.canonicalize_name(req.name)))
return more
def find(self, package_names: Iterable[str]) -> dict:
data = {}
while package_names:
more: Set[str] = set()
logger.info("Discovering %s", ", ".join(package_names))
for name in package_names:
entries: Dict[str, dict] = {}
for e in self.iter_package_entries(name):
result = self.process_package_entry(name, e)
if result is None:
continue
more |= result
entries[e.version] = {"dependencies": e.dependencies}
data[name] = entries
package_names = {n for n in more if n not in data}
return data
def main(args: Optional[List[str]]) -> int:
options = parse_args(args)
if not options.output:
output_path: Optional[pathlib.Path] = None
else:
output_path = get_output_path(options.output, options.overwrite)
matcher = WheelMatcher.compatible_with(
options.python_version, options.interpreter, options.platforms
)
finder = Finder(["https://pypi.org/simple"], matcher, requests.Session())
data = finder.find(options.package_names)
if output_path is None:
json.dump(data, sys.stdout, indent=2)
print()
else:
with output_path.open("w") as f:
json.dump(data, f, indent="\t")
logger.info("Written: %s", os.fspath(output_path))
return 0
if __name__ == "__main__":
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
sys.exit(main(None))
|
6b3fba2572f65154c3b213422ec46e66fff688db
|
c5311176cd07f267fb1ca4f9cd71b308ed0778c5
|
/scripts/data/shared/normalize.py
|
87a27db6d8fab9da0db863171ada89dc191bf3dc
|
[
"MIT"
] |
permissive
|
dwadden/dygiepp
|
1a71885b0588bb5f0997dec13b27ebfd30169e7c
|
ab764cd0d48b7c430a78a1edddf5acaeec13c109
|
refs/heads/master
| 2023-07-27T19:30:00.399646
| 2023-07-19T20:52:06
| 2023-07-19T20:52:06
| 171,385,430
| 534
| 129
|
MIT
| 2023-07-19T20:52:08
| 2019-02-19T01:48:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
normalize.py
|
import argparse
import json
import os
from dygie.data.dataset_readers.document import Document, Dataset
def load_jsonl(fname):
return [json.loads(x) for x in open(fname)]
def save_jsonl(xs, fname):
with open(fname, "w") as f:
for x in xs:
print(json.dumps(x), file=f)
def get_args():
parser = argparse.ArgumentParser(
description="Normalize a dataset by adding a `dataset` field and splitting long documents.")
parser.add_argument("input_directory", type=str,
help="Directory with train, dev, and test files.")
parser.add_argument("output_directory", type=str,
help="Directory where the output files will go.")
parser.add_argument("--file_extension", type=str, default="jsonl",
help="File extension for data files.")
parser.add_argument("--train_name", type=str, default="train",
help="Name of the file with the training split.")
parser.add_argument("--dev_name", type=str, default="dev",
help="Name of the file with the dev split. For instance, `validation`.")
parser.add_argument("--test_name", type=str, default="test",
help="Name of the file with the test split.")
parser.add_argument("--max_tokens_per_doc", type=int, default=500,
help="Maximum tokens per document. Longer ones will be split. If set to 0, do not split documents.")
parser.add_argument("--dataset", type=str, default=None, help="Dataset name.")
return parser.parse_args()
class Normalizer:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def normalize(self):
os.makedirs(self.output_directory, exist_ok=True)
fold_names = [self.train_name, self.dev_name, self.test_name]
for fold in fold_names:
self.process_fold(fold)
def process_fold(self, fold):
fname = f"{self.input_directory}/{fold}.{self.file_extension}"
dataset = Dataset.from_jsonl(fname)
res = []
for doc in dataset:
res.extend(self.process_entry(doc))
out_name = f"{self.output_directory}/{fold}.{self.file_extension}"
save_jsonl(res, out_name)
def process_entry(self, doc):
doc.dataset = self.dataset
if self.max_tokens_per_doc > 0:
splits = doc.split(self.max_tokens_per_doc)
else:
splits = [doc]
return [split.to_json() for split in splits]
def main():
args = get_args()
normalizer = Normalizer(**vars(args))
normalizer.normalize()
if __name__ == "__main__":
main()
|
ed2822b58dc4c3156b178abbe8d2db714a360583
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/_dynamo/variables/dicts.py
|
4023fe7b226e9c44e045262d2f8380f877a4d33f
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 17,934
|
py
|
dicts.py
|
import collections
import dataclasses
import functools
import inspect
from typing import Any, Dict, List, Tuple
import torch
import torch.fx
from torch.fx import _pytree as fx_pytree
from torch.utils import _pytree as pytree
from .. import variables
from ..bytecode_transformation import create_call_function, create_instruction
from ..eval_frame import skip_code
from ..exc import unimplemented
from ..source import AttrSource, GlobalWeakRefSource
from ..utils import global_key_name, istensor
from .base import MutableLocal, VariableTracker
from .constant import ConstantVariable
from .tensor import TensorVariable
class ConstDictVariable(VariableTracker):
def __init__(self, items, user_cls, recursively_contains=None, **kwargs):
super().__init__(recursively_contains=recursively_contains, **kwargs)
self.guards.update(VariableTracker.propagate(items.values())["guards"])
self.items = items
self.user_cls = user_cls
def as_proxy(self):
return {k: v.as_proxy() for k, v in self.items.items()}
def as_python_constant(self):
return {k: v.as_python_constant() for k, v in self.items.items()}
def python_type(self):
return self.user_cls
def reconstruct(self, codegen):
# instructions to load collections.OrderedDict if necessary
if self.user_cls is collections.OrderedDict:
codegen.extend_output(
[
codegen.create_load_python_module(collections, True),
codegen.create_load_attr("OrderedDict"),
]
)
# instructions to build the dict keys and values
for key in self.items.keys():
if istensor(key):
codegen.append_output(
codegen.create_load_global(global_key_name(key), True, add=True)
)
codegen.extend_output(create_call_function(0, False))
else:
codegen.append_output(codegen.create_load_const(key))
codegen(self.items[key])
# BUILD_MAP and calling collections.OrderedDict if necessary
if self.user_cls is collections.OrderedDict:
return [
create_instruction("BUILD_MAP", arg=len(self.items)),
*create_call_function(1, False),
]
# BUILD_MAP only if user_cls is dict
else:
return [create_instruction("BUILD_MAP", arg=len(self.items))]
def getitem_const(self, arg: VariableTracker):
return self.items[ConstDictVariable.get_key(arg)].add_options(self, arg)
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
from . import ConstantVariable, TupleVariable
options = VariableTracker.propagate(self, args, kwargs.values())
val = self.items
if name == "__getitem__":
return self.getitem_const(args[0])
elif name == "items":
assert not (args or kwargs)
return TupleVariable(
[
TupleVariable(
[
ConstDictVariable._key_to_var(
tx,
k,
**options,
),
v,
],
**options,
)
for k, v in val.items()
],
**options,
)
elif name == "keys":
assert not (args or kwargs)
return TupleVariable(
[
ConstDictVariable._key_to_var(
tx,
k,
**options,
)
for k in val.keys()
],
**options,
)
elif name == "values":
assert not (args or kwargs)
return TupleVariable(list(val.values()), **options)
elif name == "__len__":
assert not (args or kwargs)
return ConstantVariable(len(self.items), **options)
elif (
name == "__setitem__"
and args
and ConstDictVariable.is_valid_key(args[0])
and self.mutable_local
):
assert not kwargs and len(args) == 2
k = ConstDictVariable.get_key(args[0])
if istensor(k):
tx.store_dict_key(global_key_name(k), k)
newval = collections.OrderedDict(val)
newval[k] = args[1]
new_rec_contains = self.recursively_contains.union(
args[1].recursively_contains
)
if args[1].mutable_local is not None:
new_rec_contains.add(args[1].mutable_local)
return tx.replace_all(
self,
self.modifed(newval, new_rec_contains, **options),
)
elif (
name in ("pop", "get")
and args
and ConstDictVariable.is_valid_key(args[0])
and ConstDictVariable.get_key(args[0]) not in self.items
and len(args) == 2
):
# missing item, return the default value
return args[1].add_options(options)
elif (
name == "pop"
and args
and ConstDictVariable.is_valid_key(args[0])
and self.mutable_local
):
newval = collections.OrderedDict(val)
result = newval.pop(ConstDictVariable.get_key(args[0]))
tx.replace_all(self, self.modifed(newval, None, **options))
return result.add_options(options)
elif (
name == "update"
and args
and isinstance(args[0], ConstDictVariable)
and self.mutable_local
):
newval = collections.OrderedDict(val)
newval.update(args[0].items)
new_rec_contains = self.recursively_contains.union(
args[0].recursively_contains
)
result = self.modifed(
newval, recursively_contains=new_rec_contains, **options
)
return tx.replace_all(self, result)
elif (
name in ("get", "__getattr__")
and args
and ConstDictVariable.is_valid_key(args[0])
and ConstDictVariable.get_key(args[0]) in self.items
):
result = self.items[ConstDictVariable.get_key(args[0])]
return result.add_options(options)
elif (
name == "__contains__" and args and ConstDictVariable.is_valid_key(args[0])
):
return ConstantVariable(
ConstDictVariable.get_key(args[0]) in self.items, **options
)
else:
return super().call_method(tx, name, args, kwargs)
def modifed(self, items, recursively_contains, **options):
"""a copy of self with different items"""
return self.clone(
items=items, recursively_contains=recursively_contains, **options
)
def unpack_var_sequence(self, tx):
options = VariableTracker.propagate([self])
val = self.items
result = [ConstDictVariable._key_to_var(tx, k, **options) for k in val.keys()]
return result
@classmethod
def get_key(cls, arg: VariableTracker):
if isinstance(arg, TensorVariable) and arg.specialized_value is not None:
return arg.specialized_value
else:
return arg.as_python_constant()
@classmethod
def is_valid_key(cls, key):
return (
key.is_python_constant()
or isinstance(key, TensorVariable)
and key.specialized_value is not None
or isinstance(key, ConstantVariable)
and key.python_type() is torch.dtype
)
@classmethod
def _key_to_var(cls, tx, key, **options):
from .builder import VariableBuilder
if istensor(key):
return VariableBuilder(tx, GlobalWeakRefSource(global_key_name(key)))(key)
else:
assert ConstantVariable.is_literal(key)
return ConstantVariable(key, **options)
class DefaultDictVariable(ConstDictVariable):
def __init__(self, items, user_cls, default_factory=None, **kwargs):
super().__init__(items, user_cls, **kwargs)
assert user_cls is collections.defaultdict
self.default_factory = default_factory
def is_python_constant(self):
# Return false for unsupported defaults. This ensures that a bad handler
# path is not taken in BuiltinVariable for getitem.
if self.default_factory not in [list, tuple, dict] and not self.items:
return False
return super().is_python_constant()
@staticmethod
def is_supported_arg(arg):
if isinstance(arg, variables.BuiltinVariable):
return arg.fn in [list, tuple, dict]
else:
return isinstance(arg, variables.functions.BaseUserFunctionVariable)
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
options = VariableTracker.propagate(self, args, kwargs.values())
if name == "__getitem__":
k = ConstDictVariable.get_key(args[0])
if k in self.items:
return self.getitem_const(args[0])
else:
if self.default_factory is None:
raise KeyError(f"{k}")
else:
if istensor(k):
tx.store_dict_key(global_key_name(k), k)
new_val = collections.OrderedDict(self.items)
default_var = self.default_factory.call_function(tx, [], {})
new_val[k] = default_var
new_rec_contains = self.recursively_contains.union(
default_var.recursively_contains
)
if default_var.mutable_local is not None:
new_rec_contains.add(default_var.mutable_local)
tx.replace_all(
self, self.modifed(new_val, new_rec_contains, **options)
)
return default_var
else:
return super().call_method(tx, name, args, kwargs)
class DataClassVariable(ConstDictVariable):
"""
This is a bit of a hack to deal with
transformers.file_utils.ModelOutput() from huggingface.
ModelOutput causes trouble because it a a mix of a dataclass and a
OrderedDict and it calls super() methods implemented in C.
"""
# ModelOutput() excludes None, though generic datclasses don't
include_none = False
@staticmethod
@functools.lru_cache(None)
def _patch_once():
from transformers.file_utils import ModelOutput
for obj in ModelOutput.__dict__.values():
if callable(obj):
skip_code(obj.__code__)
@staticmethod
def is_matching_cls(cls):
try:
from transformers.file_utils import ModelOutput
return issubclass(cls, ModelOutput)
except ImportError:
return False
@classmethod
def is_matching_object(cls, obj):
return cls.is_matching_cls(type(obj))
@classmethod
def create(cls, user_cls, args, kwargs, options):
DataClassVariable._patch_once()
skip_code(user_cls.__init__.__code__)
keys = [f.name for f in dataclasses.fields(user_cls)]
bound = inspect.signature(user_cls).bind(*args, **kwargs)
bound.apply_defaults()
assert set(bound.arguments.keys()) == set(keys)
items = collections.OrderedDict()
for key in keys:
val = bound.arguments[key]
if isinstance(val, VariableTracker):
items[key] = val
else:
if cls.include_none:
assert variables.ConstantVariable.is_literal(val)
items[key] = variables.ConstantVariable(val)
else:
assert val is None, f"unexpected {val}"
if len(items) == 1 and not isinstance(items[keys[0]], variables.TensorVariable):
unimplemented("DataClassVariable iterator constructor")
# TODO(jansel): implement unpacking logic in ModelOutput.__post_init__
return cls(items, user_cls, **options)
@classmethod
def wrap(cls, builder, obj):
user_cls = type(obj)
keys = [f.name for f in dataclasses.fields(user_cls)]
excluded = []
items = collections.OrderedDict()
for key in keys:
# __init__ function of a dataclass might not have yet defined the key
if hasattr(obj, key):
val = getattr(obj, key)
var = builder.__class__(
tx=builder.tx, source=AttrSource(builder.source, key)
)(val)
if val is not None or cls.include_none:
items[key] = var
else:
excluded.append(var)
return cls(
items, user_cls, **VariableTracker.propagate(excluded, items.values())
)
def __init__(self, items, user_cls, **options):
super().__init__(items, user_cls, **options)
assert self.is_matching_cls(user_cls)
def as_proxy(self):
raise NotImplementedError()
def reconstruct(self, codegen):
codegen.extend_output([codegen._create_load_const(self.user_cls)])
keys = tuple(self.items.keys())
for key in keys:
codegen(self.items[key])
return codegen.create_call_function_kw(len(keys), keys, True)
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
options = VariableTracker.propagate(self, args, kwargs.values())
if name == "__getitem__":
assert not kwargs and len(args) == 1
index = args[0].as_python_constant()
if isinstance(index, str):
return self.items[index].add_options(options)
else:
return (
self.call_method(tx, "to_tuple", [], {})
.call_method(tx, "__getitem__", args, kwargs)
.add_options(options)
)
elif name == "to_tuple":
assert not (args or kwargs)
return variables.TupleVariable(list(self.items.values()), **options)
elif name == "__setattr__":
name = "__setitem__"
return super().call_method(tx, name, args, kwargs)
def var_getattr(self, tx, name: str) -> "VariableTracker":
if name in self.items:
return self.call_method(
tx, "__getitem__", [variables.ConstantVariable(name)], {}
)
elif not self.include_none:
defaults = {f.name: f.default for f in dataclasses.fields(self.user_cls)}
if name in defaults:
assert variables.ConstantVariable.is_literal(defaults[name])
return variables.ConstantVariable(defaults[name]).add_options(self)
super().var_getattr(tx, name)
class HFPretrainedConfigVariable(VariableTracker):
"""
Hack for HuggingFace PretrainedConfig
"""
@staticmethod
def is_matching_cls(cls):
try:
from transformers.configuration_utils import PretrainedConfig
return issubclass(cls, PretrainedConfig)
except ImportError:
return False
@classmethod
def is_matching_object(cls, obj):
return cls.is_matching_cls(type(obj))
def __init__(self, obj, **kwargs):
super().__init__(**kwargs)
self.obj = obj
assert self.is_matching_cls(type(obj))
def var_getattr(self, tx, name: str) -> "VariableTracker":
from . import ConstantVariable
return ConstantVariable(getattr(self.obj, name))
def call_hasattr(self, tx, name: str) -> "VariableTracker":
return variables.ConstantVariable(hasattr(self.obj, name)).add_options(self)
def _dictvariable_flatten(d: ConstDictVariable) -> Tuple[List[Any], pytree.Context]:
if d.python_type() is not dict:
# Note - ConstDictVariable can contain different kinds of dicts.
# However, flattening for those must differ and so cannot share the same registration as even if we
# consult the underlying python_type() to guide our flattening, that data will need to be propagated
# to unflatten. We do not have a good mechanism of doing this today, so we find it easier to treat this
# as unimplemented for now.
# TODO - Add support for flattening a ConstDictVariable with any underlying user_cls
unimplemented(f"Unsupported flattening of {d.python_type()}")
return list(d.items.values()), list(d.items.keys())
def _dictvariable_unflatten(
values: List[Any], context: pytree.Context
) -> ConstDictVariable:
assert all(isinstance(x, VariableTracker) for x in values)
# Guard propagation happens in the ConstDictVariable constructor
return ConstDictVariable(
dict(zip(context, values)), user_cls=dict, mutable_local=MutableLocal()
)
def _register_dynamo_dict_to_tree_spec():
pytree._register_pytree_node(
ConstDictVariable,
_dictvariable_flatten,
_dictvariable_unflatten,
pytree._dict_to_str,
pytree._maybe_str_to_dict,
)
fx_pytree.register_pytree_flatten_spec(
ConstDictVariable,
_dictvariable_flatten,
)
|
20228baed57374a0ff58d7ae23bf8636bcd81632
|
2be5058bf498eadfbe9775e99840c8dec60c39ca
|
/docs/snippets/pair0_sync.py
|
9452e78dd1136c13a5759deb51765f341b1211b1
|
[
"MIT"
] |
permissive
|
codypiersall/pynng
|
70a46a3f86c25da900f4c031885c64a1347a818f
|
62471eec746631bec36a4004b55fc548a28375cd
|
refs/heads/master
| 2023-08-31T23:56:33.522690
| 2022-11-20T04:09:19
| 2022-11-20T04:26:43
| 144,490,947
| 224
| 67
|
MIT
| 2023-01-17T14:57:43
| 2018-08-12T18:24:55
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
pair0_sync.py
|
from pynng import Pair0
address = 'tcp://127.0.0.1:13131'
# in real code you should also pass recv_timeout and/or send_timeout
with Pair0(listen=address) as s0, Pair0(dial=address) as s1:
s0.send(b'hello s1')
print(s1.recv()) # prints b'hello s1'
s1.send(b'hi old buddy s0, great to see ya')
print(s0.recv()) # prints b'hi old buddy s0, great to see ya
|
4f6b90ec8c60bf2da30157f0d3f5c3e305a7b215
|
fb5c5d50d87a6861393d31911b9fae39bdc3cc62
|
/Scripts/sims4communitylib/utils/localization/common_localized_string_separators.py
|
aa7ae185a7229f4c7f81a4e790b21fa391c1fe7e
|
[
"CC-BY-4.0"
] |
permissive
|
ColonolNutty/Sims4CommunityLibrary
|
ee26126375f2f59e5567b72f6eb4fe9737a61df3
|
58e7beb30b9c818b294d35abd2436a0192cd3e82
|
refs/heads/master
| 2023-08-31T06:04:09.223005
| 2023-08-22T19:57:42
| 2023-08-22T19:57:42
| 205,197,959
| 183
| 38
| null | 2023-05-28T16:17:53
| 2019-08-29T15:48:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,933
|
py
|
common_localized_string_separators.py
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims4communitylib.enums.enumtypes.common_int import CommonInt
from sims4communitylib.enums.strings_enum import CommonStringId
class CommonLocalizedStringSeparator(CommonInt):
"""Used to separate multiple LocalizedString.
See the :func:`.CommonLocalizationUtils.combine_localized_strings` function for more details.
.. note:: The values are as follows:
NO_SEPARATOR = "StringString"
AND = "{String} and {String}"
ARE = "String are String"
COLON_SPACE = "String: String"
COMMA_SPACE = "String, String"
COMMA_SPACE_AND = "{String}, and {String}"
COMMA_SPACE_OR = "{String}, or {String}"
HYPHEN = "String-String"
IS = "String is String"
NEWLINE = "String\nString"
NEWLINE_NEWLINE = "String\n\nString"
OR = "String or String"
PLUS = "String+String"
SPACE = "String String"
SPACE_PARENTHESIS_SURROUNDED = "String (String)"
"""
# {String}{String}
NO_SEPARATOR: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_COMBINE_TWO_STRINGS
# {String} and {String}
AND: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_AND_STRING
# {String} are {String}
ARE: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_ARE_STRING
# {String}: {String}
COLON_SPACE: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_COLON_SPACE_STRING
# {String}, {String}
COMMA_SPACE: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_COMMA_SPACE_STRING
# {String}, and {String}
COMMA_SPACE_AND: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_COMMA_SPACE_AND_STRING
# {String}, or {String}
COMMA_SPACE_OR: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_COMMA_SPACE_OR_STRING
# {String}-{String}
HYPHEN: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_HYPHEN_STRING
# {String} is {String}
IS: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_IS_STRING
# {String}\n{String}
NEWLINE: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_NEWLINE_STRING
# {String}\n\n{String}
NEWLINE_NEWLINE: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_NEWLINE_NEWLINE_STRING
# {String} or {String}
OR: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_OR_STRING
# {String}+{String}
PLUS: 'CommonLocalizedStringSeparator' = CommonStringId.S4CL_STRING_PLUS_STRING
# {String} {String}
SPACE: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_SPACE_STRING
# {String} ({String})
SPACE_PARENTHESIS_SURROUNDED: 'CommonLocalizedStringSeparator' = CommonStringId.STRING_SPACE_PARENTHESIS_SURROUNDED_STRING
|
5d3ddf8030d6e2ca967e3499074edd226778f423
|
f791462fb1286607d16459c1602d133f8d8c8b59
|
/test/test_pickle.py
|
a54479be20546c035ee9c2e593d77de7c3251852
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/numpyro
|
b071ed2bd93be41bafc3da8764c9f5617f996d92
|
ca96eca8e8e1531e71ba559ef7a8ad3b4b68cbc2
|
refs/heads/master
| 2023-09-03T15:56:13.252692
| 2023-08-28T14:32:25
| 2023-08-28T14:32:25
| 170,580,540
| 1,941
| 219
|
Apache-2.0
| 2023-09-04T11:26:11
| 2019-02-13T21:13:59
|
Python
|
UTF-8
|
Python
| false
| false
| 7,985
|
py
|
test_pickle.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pickle
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
from jax.tree_util import tree_all, tree_map
import numpyro
from numpyro.contrib.funsor import config_kl
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.distributions.constraints import (
boolean,
circular,
corr_cholesky,
corr_matrix,
greater_than,
interval,
l1_ball,
lower_cholesky,
nonnegative_integer,
ordered_vector,
positive,
positive_definite,
positive_integer,
positive_ordered_vector,
real,
real_matrix,
real_vector,
scaled_unit_lower_cholesky,
simplex,
softplus_lower_cholesky,
softplus_positive,
sphere,
unit_interval,
)
from numpyro.infer import (
HMC,
HMCECS,
MCMC,
NUTS,
SA,
SVI,
BarkerMH,
DiscreteHMCGibbs,
MixedHMC,
Predictive,
TraceEnum_ELBO,
)
from numpyro.infer.autoguide import AutoDelta, AutoDiagonalNormal, AutoNormal
def normal_model():
numpyro.sample("x", dist.Normal(0, 1))
def bernoulli_model():
numpyro.sample("x", dist.Bernoulli(0.5))
def logistic_regression():
data = jnp.arange(10)
x = numpyro.sample("x", dist.Normal(0, 1))
with numpyro.plate("N", 10, subsample_size=2):
batch = numpyro.subsample(data, 0)
numpyro.sample("obs", dist.Bernoulli(logits=x), obs=batch)
def gmm(data, K):
mix_proportions = numpyro.sample("phi", dist.Dirichlet(jnp.ones(K)))
with numpyro.plate("num_clusters", K, dim=-1):
cluster_means = numpyro.sample("cluster_means", dist.Normal(jnp.arange(K), 1.0))
with numpyro.plate("data", data.shape[0], dim=-1):
assignments = numpyro.sample(
"assignments",
dist.Categorical(mix_proportions),
infer={"enumerate": "parallel"},
)
numpyro.sample("obs", dist.Normal(cluster_means[assignments], 1.0), obs=data)
@pytest.mark.parametrize("kernel", [BarkerMH, HMC, NUTS, SA])
def test_pickle_hmc(kernel):
mcmc = MCMC(kernel(normal_model), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
tree_all(tree_map(assert_allclose, mcmc.get_samples(), pickled_mcmc.get_samples()))
@pytest.mark.parametrize("kernel", [BarkerMH, HMC, NUTS, SA])
def test_pickle_hmc_enumeration(kernel):
K, N = 3, 1000
true_cluster_means = jnp.array([1.0, 5.0, 10.0])
true_mix_proportions = jnp.array([0.1, 0.3, 0.6])
cluster_assignments = dist.Categorical(true_mix_proportions).sample(
random.PRNGKey(0), (N,)
)
data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample(
random.PRNGKey(1)
)
mcmc = MCMC(kernel(gmm), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0), data, K)
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
tree_all(tree_map(assert_allclose, mcmc.get_samples(), pickled_mcmc.get_samples()))
@pytest.mark.parametrize("kernel", [DiscreteHMCGibbs, MixedHMC])
def test_pickle_discrete_hmc(kernel):
mcmc = MCMC(kernel(HMC(bernoulli_model)), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
tree_all(tree_map(assert_allclose, mcmc.get_samples(), pickled_mcmc.get_samples()))
def test_pickle_hmcecs():
mcmc = MCMC(HMCECS(NUTS(logistic_regression)), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
tree_all(tree_map(assert_allclose, mcmc.get_samples(), pickled_mcmc.get_samples()))
def poisson_regression(x, N):
rate = numpyro.sample("param", dist.Gamma(1.0, 1.0))
batch_size = len(x) if x is not None else None
with numpyro.plate("batch", N, batch_size):
numpyro.sample("x", dist.Poisson(rate), obs=x)
@pytest.mark.parametrize("guide_class", [AutoDelta, AutoDiagonalNormal, AutoNormal])
def test_pickle_autoguide(guide_class):
x = np.random.poisson(1.0, size=(100,))
guide = guide_class(poisson_regression)
optim = numpyro.optim.Adam(1e-2)
svi = SVI(poisson_regression, guide, optim, numpyro.infer.Trace_ELBO())
svi_result = svi.run(random.PRNGKey(1), 3, x, len(x))
pickled_guide = pickle.loads(pickle.dumps(guide))
predictive = Predictive(
poisson_regression,
guide=pickled_guide,
params=svi_result.params,
num_samples=1,
return_sites=["param", "x"],
)
samples = predictive(random.PRNGKey(1), None, 1)
assert set(samples.keys()) == {"param", "x"}
def test_pickle_singleton_constraint():
# some numpyro constraint classes such as constraints._Real, are only accessible
# through their public singleton instance, (such as constraint.real). This test
# ensures that pickling and unpickling singleton instances does not re-create
# additional instances, which is the default behavior of pickle, and which would
# break singleton semantics.
singleton_constraints = (
boolean,
circular,
corr_cholesky,
corr_matrix,
l1_ball,
lower_cholesky,
nonnegative_integer,
ordered_vector,
positive,
positive_definite,
positive_integer,
positive_ordered_vector,
real,
real_matrix,
real_vector,
scaled_unit_lower_cholesky,
simplex,
softplus_lower_cholesky,
softplus_positive,
sphere,
unit_interval,
)
for cnstr in singleton_constraints:
roundtripped_cnstr = pickle.loads(pickle.dumps(cnstr))
# make sure that the unpickled constraint is the original singleton constraint
assert roundtripped_cnstr is cnstr
# Test that it remains possible to pickle newly-created, non-singleton constraints.
# because these constraints are neither singleton nor exposed as top-level variables
# of the numpyro.distributions.constraints module, these objects are not pickled by
# reference, but by value.
int_cstr = interval(1.0, 2.0)
roundtripped_int_cstr = pickle.loads(pickle.dumps(int_cstr))
assert type(roundtripped_int_cstr) is type(int_cstr)
assert int_cstr.lower_bound == roundtripped_int_cstr.lower_bound
assert int_cstr.upper_bound == roundtripped_int_cstr.upper_bound
gt_cstr = greater_than(1.0)
roundtripped_gt_cstr = pickle.loads(pickle.dumps(gt_cstr))
assert type(roundtripped_gt_cstr) is type(gt_cstr)
assert gt_cstr.lower_bound == roundtripped_gt_cstr.lower_bound
def test_mcmc_pickle_post_warmup():
mcmc = MCMC(NUTS(normal_model), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
pickled_mcmc.post_warmup_state = pickled_mcmc.last_state
pickled_mcmc.run(random.PRNGKey(1))
def bernoulli_regression(data):
f = numpyro.sample("beta", dist.Beta(1.0, 1.0))
with numpyro.plate("N", len(data)):
numpyro.sample("obs", dist.Bernoulli(f), obs=data)
def test_beta_bernoulli():
data = jnp.array([1.0] * 8 + [0.0] * 2)
def guide(data):
alpha_q = numpyro.param("alpha_q", 1.0, constraint=constraints.positive)
beta_q = numpyro.param("beta_q", 1.0, constraint=constraints.positive)
numpyro.sample("beta", dist.Beta(alpha_q, beta_q))
pickled_model = pickle.loads(pickle.dumps(config_kl(bernoulli_regression)))
optim = numpyro.optim.Adam(1e-2)
svi = SVI(config_kl(bernoulli_regression), guide, optim, TraceEnum_ELBO())
svi_result = svi.run(random.PRNGKey(0), 3, data)
params = svi_result.params
svi = SVI(pickled_model, guide, optim, TraceEnum_ELBO())
svi_result = svi.run(random.PRNGKey(0), 3, data)
pickled_params = svi_result.params
tree_all(tree_map(assert_allclose, params, pickled_params))
|
4aed034cd3f0525f45e846d50ccd886df173170c
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/privateca/snippets/create_certificate_csr.py
|
d3bc892507ce0279e64f56ad5dc1b09b3591cf72
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,080
|
py
|
create_certificate_csr.py
|
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START privateca_create_certificate_csr]
import google.cloud.security.privateca_v1 as privateca_v1
from google.protobuf import duration_pb2
def create_certificate_csr(
project_id: str,
location: str,
ca_pool_name: str,
ca_name: str,
certificate_name: str,
certificate_lifetime: int,
pem_csr: str,
) -> None:
"""
Create a Certificate which is issued by the specified Certificate Authority (CA).
The certificate details and the public key is provided as a Certificate Signing Request (CSR).
Args:
project_id: project ID or project number of the Cloud project you want to use.
location: location you want to use. For a list of locations, see: https://cloud.google.com/certificate-authority-service/docs/locations.
ca_pool_name: set a unique name for the CA pool.
ca_name: the name of the certificate authority to sign the CSR.
certificate_name: set a unique name for the certificate.
certificate_lifetime: the validity of the certificate in seconds.
pem_csr: set the Certificate Issuing Request in the pem encoded format.
"""
ca_service_client = privateca_v1.CertificateAuthorityServiceClient()
# The public key used to sign the certificate can be generated using any crypto library/framework.
# Also you can use Cloud KMS to retrieve an already created public key.
# For more info, see: https://cloud.google.com/kms/docs/retrieve-public-key.
# Create certificate with CSR.
# The pem_csr contains the public key and the domain details required.
certificate = privateca_v1.Certificate(
pem_csr=pem_csr,
lifetime=duration_pb2.Duration(seconds=certificate_lifetime),
)
# Create the Certificate Request.
# Set the CA which is responsible for creating the certificate with the provided CSR.
request = privateca_v1.CreateCertificateRequest(
parent=ca_service_client.ca_pool_path(project_id, location, ca_pool_name),
certificate_id=certificate_name,
certificate=certificate,
issuing_certificate_authority_id=ca_name,
)
response = ca_service_client.create_certificate(request=request)
print(f"Certificate created successfully: {response.name}")
# Get the signed certificate and the issuer chain list.
print(f"Signed certificate: {response.pem_certificate}")
print(f"Issuer chain list: {response.pem_certificate_chain}")
# [END privateca_create_certificate_csr]
|
19e4f92b29c15a0f7f06ccee49183e6437a0cd29
|
fa89ef4a8eb06dc2015d7116637f230b6891eb8d
|
/test/units/crypto/cipher/test_camellia.py
|
25ce716ed8c96e64da9ef43dca0520fde6848063
|
[
"BSD-3-Clause"
] |
permissive
|
binref/refinery
|
f61878d9fddf616fee8edf226df22f6a35238940
|
4c7c3717ae45543b9d7bae60a4af4c00993cf719
|
refs/heads/master
| 2023-08-17T17:02:34.357138
| 2023-08-14T08:43:05
| 2023-08-14T08:43:05
| 228,019,736
| 439
| 48
|
NOASSERTION
| 2023-09-11T10:26:02
| 2019-12-14T12:32:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,407
|
py
|
test_camellia.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ... import TestUnitBase
from refinery.units.crypto.cipher.camellia import Camellia, FL_FWD, FL_INV
class TestCamellia(TestUnitBase):
def test_fl_functions(self):
key = 0x2BFAEDFDEEEAFBFE
self.assertEqual(FL_INV(FL_FWD(0xDEFACED, key), key), 0xDEFACED)
def test_invertible(self):
u = Camellia(B'#BINARY-REFINERY', None)
m = B'0123456789ABCDEF'
self.assertEqual(u.block_decrypt(u.block_encrypt(m)), m)
def test_rfc_3731_128(self):
K = bytes.fromhex('01 23 45 67 89 ab cd ef fe dc ba 98 76 54 32 10')
M = bytes.fromhex('01 23 45 67 89 ab cd ef fe dc ba 98 76 54 32 10')
C = bytes.fromhex('67 67 31 38 54 96 69 73 08 57 06 56 48 ea be 43')
u = self.load(K, raw=True)
self.assertEqual(bytes(C | u), M)
def test_rfc_3731_192(self):
K = bytes.fromhex('01 23 45 67 89 ab cd ef fe dc ba 98 76 54 32 10 00 11 22 33 44 55 66 77')
M = bytes.fromhex('01 23 45 67 89 ab cd ef fe dc ba 98 76 54 32 10')
C = bytes.fromhex('b4 99 34 01 b3 e9 96 f8 4e e5 ce e7 d7 9b 09 b9')
u = self.load(K, raw=True)
self.assertEqual(bytes(C | u), M)
def test_rfc_3731_256(self):
K = bytes.fromhex('01 23 45 67 89 ab cd ef fe dc ba 98 76 54 32 10 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff')
M = bytes.fromhex('01 23 45 67 89 ab cd ef fe dc ba 98 76 54 32 10')
C = bytes.fromhex('9a cc 23 7d ff 16 d7 6c 20 ef 7c 91 9e 3a 75 09')
u = self.load(K, raw=True)
self.assertEqual(bytes(C | u), M)
def test_openssl_ofb(self):
M = b"This is a secret message.\n"
C = bytes.fromhex('3e1a c0f8 aa74 e546 8925 eb0a 1776 1127 36dd fea5 869c 171f 547c')
K = bytes.fromhex('30313233343536373839616263646566')
V = bytes.fromhex('30313233343536373839616263646566')
u = self.load(K, iv=V, mode='ofb')
self.assertEqual(bytes(M | -u), C)
self.assertEqual(bytes(C | +u), M)
def test_openssl_cfb(self):
M = b"This is a secret message.\n"
C = bytes.fromhex('3e1a c0f8 aa74 e546 8925 eb0a 1776 1127 6833 fe93 bb59 043d 8b3e')
K = b'0123456789abcdef'
V = b'0123456789abcdef'
u = self.load(K, iv=V, mode='cfb', segment_size=128)
self.assertEqual(bytes(M | -u), C)
self.assertEqual(bytes(C | +u), M)
|
adb6268891fc926931ccd1070657b0236e043272
|
6a468c1650b3c083f102f19ace0b0d6e4d0686f7
|
/sympy/matrices/utilities.py
|
b8a680b47e63615e210e561639a192ba47c642d3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
sympy/sympy
|
a5f8accaa7686c59d9b5c94212fef60d746dac4b
|
69f98fb2b0d845e76874067a381dba37b577e8c5
|
refs/heads/master
| 2023-09-01T15:51:37.886107
| 2023-08-31T20:54:33
| 2023-08-31T20:54:33
| 640,534
| 10,928
| 5,362
|
NOASSERTION
| 2023-09-14T17:29:13
| 2010-04-30T20:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
utilities.py
|
from contextlib import contextmanager
from threading import local
from sympy.core.function import expand_mul
class DotProdSimpState(local):
def __init__(self):
self.state = None
_dotprodsimp_state = DotProdSimpState()
@contextmanager
def dotprodsimp(x):
old = _dotprodsimp_state.state
try:
_dotprodsimp_state.state = x
yield
finally:
_dotprodsimp_state.state = old
def _dotprodsimp(expr, withsimp=False):
"""Wrapper for simplify.dotprodsimp to avoid circular imports."""
from sympy.simplify.simplify import dotprodsimp as dps
return dps(expr, withsimp=withsimp)
def _get_intermediate_simp(deffunc=lambda x: x, offfunc=lambda x: x,
onfunc=_dotprodsimp, dotprodsimp=None):
"""Support function for controlling intermediate simplification. Returns a
simplification function according to the global setting of dotprodsimp
operation.
``deffunc`` - Function to be used by default.
``offfunc`` - Function to be used if dotprodsimp has been turned off.
``onfunc`` - Function to be used if dotprodsimp has been turned on.
``dotprodsimp`` - True, False or None. Will be overridden by global
_dotprodsimp_state.state if that is not None.
"""
if dotprodsimp is False or _dotprodsimp_state.state is False:
return offfunc
if dotprodsimp is True or _dotprodsimp_state.state is True:
return onfunc
return deffunc # None, None
def _get_intermediate_simp_bool(default=False, dotprodsimp=None):
"""Same as ``_get_intermediate_simp`` but returns bools instead of functions
by default."""
return _get_intermediate_simp(default, False, True, dotprodsimp)
def _iszero(x):
"""Returns True if x is zero."""
return getattr(x, 'is_zero', None)
def _is_zero_after_expand_mul(x):
"""Tests by expand_mul only, suitable for polynomials and rational
functions."""
return expand_mul(x) == 0
def _simplify(expr):
""" Wrapper to avoid circular imports. """
from sympy.simplify.simplify import simplify
return simplify(expr)
|
36f28a5c210ee01bf990dc1bcf14acff42a24256
|
b8a803694c283a5acd13ab6760a36710884ab24f
|
/llvm/tests/test_llrt.py
|
2b220f5f1f15e9c025ca39ce4f5a194be983da06
|
[
"NCSA",
"BSD-3-Clause"
] |
permissive
|
llvmpy/llvmpy
|
8a4c31e731364ead802231b97e058b8f8c444f96
|
13130fe35f1fb03a7051ad46c36146002391a6fa
|
refs/heads/master
| 2016-09-05T16:48:54.694686
| 2015-04-28T16:21:34
| 2015-04-28T16:21:34
| 3,375,197
| 155
| 13
| null | 2015-05-27T18:36:45
| 2012-02-07T07:09:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 951
|
py
|
test_llrt.py
|
import unittest
import llvm.core as lc
import llvm.ee as le
from .support import TestCase, tests
class TestLLRT(TestCase):
def test_llrt_divmod(self):
from llvm import llrt
m = lc.Module.new('testllrt')
longlong = lc.Type.int(64)
lfunc = m.add_function(lc.Type.function(longlong, [longlong, longlong]), 'foo')
bldr = lc.Builder.new(lfunc.append_basic_block(''))
bldr.ret(bldr.udiv(*lfunc.args))
llrt.replace_divmod64(lfunc)
rt = llrt.LLRT()
rt.install_symbols()
engine = le.EngineBuilder.new(m).create()
pointer = engine.get_pointer_to_function(lfunc)
from ctypes import CFUNCTYPE, c_uint64
func = CFUNCTYPE(c_uint64, c_uint64, c_uint64)(pointer)
a, b = 98342, 2231
self.assertEqual(func(98342, 2231), 98342 // 2231)
rt.uninstall_symbols()
tests.append(TestLLRT)
if __name__ == '__main__':
unittest.main()
|
df6d2dc8b9b18707933378a1352560b08ad63022
|
833ef1cc5cbd5cf76da144d10d393e30976d9185
|
/froide/publicbody/migrations/0022_auto_20180726_1151.py
|
7e14d7876750cde3a291d808ae1a6e2c8ac13303
|
[
"MIT"
] |
permissive
|
okfde/froide
|
d022407ec30bf018e6ca587ae9df0b73a8625edf
|
16e3c69b333fc82cb1e52378fc003ddf071152a7
|
refs/heads/main
| 2023-08-31T08:02:23.343743
| 2023-08-29T07:01:03
| 2023-08-29T07:01:03
| 1,700,944
| 230
| 48
|
MIT
| 2023-09-13T09:10:40
| 2011-05-04T12:20:51
|
Python
|
UTF-8
|
Python
| false
| false
| 585
|
py
|
0022_auto_20180726_1151.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-26 09:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("publicbody", "0021_proposedpublicbody"),
]
operations = [
migrations.AlterModelOptions(
name="proposedpublicbody",
options={
"ordering": ("-created_at",),
"verbose_name": "Proposed Public Body",
"verbose_name_plural": "Proposed Public Bodies",
},
),
]
|
1956e982ba073b2edfb66939bc326f4ad3b9dc53
|
4c8ce1a65c1543d8411b990340b0ccb84bfcf18a
|
/tests/test_vars.py
|
b55366a80cb6ad35ea251285f5f6cb293e296736
|
[
"MIT"
] |
permissive
|
scipopt/PySCIPOpt
|
e7b92c39ea1cdc32a123669614e4c06bee4b73eb
|
c6329760618a88e43e32d164e363ed233499de91
|
refs/heads/master
| 2023-09-03T13:35:16.769766
| 2023-07-03T08:33:49
| 2023-07-03T08:33:49
| 59,214,089
| 390
| 92
|
MIT
| 2023-08-07T10:44:19
| 2016-05-19T14:29:21
|
Cython
|
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
test_vars.py
|
from pyscipopt import Model
def test_variablebounds():
m = Model()
x0 = m.addVar(lb=-5, ub=8)
r1 = m.addVar()
r2 = m.addVar()
y0 = m.addVar(lb=3)
t = m.addVar(lb=None)
z = m.addVar()
m.chgVarLbGlobal(x0, -2)
m.chgVarUbGlobal(x0, 4)
infeas, tightened = m.tightenVarLb(x0, -5)
assert not infeas
assert not tightened
infeas, tightened = m.tightenVarLbGlobal(x0, -1)
assert not infeas
assert tightened
infeas, tightened = m.tightenVarUb(x0, 3)
assert not infeas
assert tightened
infeas, tightened = m.tightenVarUbGlobal(x0, 9)
assert not infeas
assert not tightened
infeas, fixed = m.fixVar(z, 7)
assert not infeas
assert fixed
assert m.delVar(z)
m.addCons(r1 >= x0)
m.addCons(r2 >= -x0)
m.addCons(y0 == r1 +r2)
m.setObjective(t)
m.addCons(t >= r1 * (r1 - x0) + r2 * (r2 + x0))
m.optimize()
print("x0", m.getVal(x0))
print("r1", m.getVal(r1))
print("r2", m.getVal(r2))
print("y0", m.getVal(y0))
print("t", m.getVal(t))
def test_vtype():
m = Model()
x = m.addVar(vtype= 'C', lb=-5.5, ub=8)
y = m.addVar(vtype= 'I', lb=-5.2, ub=8)
z = m.addVar(vtype= 'B', lb=-5.2, ub=8)
w = m.addVar(vtype= 'M', lb=-5.2, ub=8)
assert x.vtype() == "CONTINUOUS"
assert y.vtype() == "INTEGER"
assert z.vtype() == "BINARY"
assert w.vtype() == "IMPLINT"
m.chgVarType(x, 'I')
assert x.vtype() == "INTEGER"
m.chgVarType(y, 'M')
assert y.vtype() == "IMPLINT"
if __name__ == "__main__":
test_variablebounds()
test_vtype()
|
480dc5dc0d32f4be2bcd154509aee87253ca7e0b
|
4c44cd6df77589a43f3ad0527f11cb1842d09a3b
|
/scout/server/blueprints/phenomodels/views.py
|
d5c18b42c4622a24198070a2cba5c0b00aa3c459
|
[
"BSD-3-Clause"
] |
permissive
|
Clinical-Genomics/scout
|
b2118a06a534917734ed8575a3bd252691fbf3e4
|
1e6a633ba0a83495047ee7b66db1ebf690ee465f
|
refs/heads/main
| 2023-09-04T06:50:31.518288
| 2023-09-01T13:33:40
| 2023-09-01T13:33:40
| 25,027,539
| 143
| 64
|
BSD-3-Clause
| 2023-09-14T05:49:33
| 2014-10-10T08:44:01
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,530
|
py
|
views.py
|
from bson import ObjectId
from flask import Blueprint, flash, redirect, request, url_for
from flask_login import current_user
from scout.server.extensions import store
from scout.server.utils import institute_and_case, templated
from . import controllers
from .forms import PhenoModelForm, PhenoSubPanelForm
phenomodels_bp = Blueprint(
"phenomodels",
__name__,
static_folder="static",
static_url_path="/phenomodels/static",
template_folder="templates",
)
@phenomodels_bp.route("/<institute_id>/advanced_phenotypes", methods=["GET"])
@templated("phenomodels.html")
def advanced_phenotypes(institute_id):
"""Show institute-level advanced phenotypes"""
institute_obj = institute_and_case(store, institute_id)
# Get a list of all users which are registered to this institute or collaborate with it
collaborators = set()
for inst_id in [institute_id] + institute_obj.get("collaborators", []):
for user in store.users(institute=inst_id):
if (
user["email"] == current_user.email
): # Do not include current user among collaborators
continue
collaborators.add((user["email"], user["name"], inst_id))
pheno_form = PhenoModelForm()
phenomodels = list(store.phenomodels(institute_id=institute_id))
data = {
"institute": institute_obj,
"collaborators": collaborators,
"pheno_form": pheno_form,
"phenomodels": phenomodels,
}
return data
@phenomodels_bp.route("/<institute_id>/create_phenomodel", methods=["POST"])
def create_phenomodel(institute_id):
"""Create a new phenomodel"""
institute_and_case(store, institute_id)
store.create_phenomodel(
institute_id, request.form.get("model_name"), request.form.get("model_desc")
)
return redirect(request.referrer)
@phenomodels_bp.route("/advanced_phenotypes/lock", methods=["POST"])
def lock_phenomodel():
"""Lock or unlock a specific phenomodel for editing"""
form = request.form
model_id = form.get("model_id")
phenomodel_obj = store.phenomodel(model_id)
if phenomodel_obj is None:
return redirect(request.referrer)
phenomodel_obj["admins"] = []
if (
"lock" in form
): # lock phenomodel for all users except current user and specified collaborators
phenomodel_obj["admins"] = [current_user.email] + form.getlist("user_admins")
# update phenomodels admins:
store.update_phenomodel(model_id, phenomodel_obj)
return redirect(request.referrer)
@phenomodels_bp.route("/advanced_phenotypes/remove", methods=["POST"])
def remove_phenomodel():
"""Remove an entire phenomodel using its id"""
model_id = request.form.get("model_id")
model_obj = store.phenomodel_collection.find_one_and_delete({"_id": ObjectId(model_id)})
if model_obj is None:
flash("An error occurred while deleting phenotype model", "warning")
return redirect(request.referrer)
@phenomodels_bp.route("/<institute_id>/phenomodel/<model_id>/edit_subpanel", methods=["POST"])
def checkbox_edit(institute_id, model_id):
"""Add or delete a single checkbox in a phenotyoe subpanel"""
controllers.edit_subpanel_checkbox(model_id, request.form)
return redirect(url_for(".phenomodel", institute_id=institute_id, model_id=model_id))
@phenomodels_bp.route("/<institute_id>/phenomodel-edit/<model_id>", methods=["POST"])
def phenomodel_edit(institute_id, model_id):
"""Edit a phenomodel or a subpanel"""
institute_and_case(store, institute_id)
controllers.update_phenomodel(model_id, request.form)
return redirect(request.referrer)
@phenomodels_bp.route("/<institute_id>/phenomodel/<model_id>", methods=["GET"])
@templated("phenomodel.html")
def phenomodel(institute_id, model_id):
"""View/Edit an advanced phenotype model"""
institute_obj = institute_and_case(store, institute_id)
phenomodel_obj = store.phenomodel(model_id)
if phenomodel_obj is None:
flash(
f"Could not retrieve given phenotype model using the given key '{model_id}'",
"warning",
)
return redirect(request.referrer)
pheno_form = PhenoModelForm()
subpanel_form = PhenoSubPanelForm()
pheno_form.model_name.data = phenomodel_obj["name"]
pheno_form.model_desc.data = phenomodel_obj["description"]
return dict(
institute=institute_obj,
pheno_form=pheno_form,
phenomodel=phenomodel_obj,
subpanel_form=subpanel_form,
)
|
81f5ed5f104b657a46b9a277f3d1d599dfa0a18c
|
99a5229ba31d633b202252e1fda6194c70c83c38
|
/opsdroid/connector/matrix/tests/conftest.py
|
89c88d3ee7461c593cac416770ffe6820d5a9a34
|
[
"Apache-2.0"
] |
permissive
|
opsdroid/opsdroid
|
1f5aeaa9a18e5c268ad7bfb46664f969f243814d
|
41246da2f6f379a889dadd1d3b4e139b65d3c9fb
|
refs/heads/master
| 2023-08-31T11:54:51.735969
| 2023-08-15T12:21:27
| 2023-08-15T12:21:27
| 64,034,523
| 835
| 593
|
Apache-2.0
| 2023-08-27T13:54:59
| 2016-07-23T20:18:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,054
|
py
|
conftest.py
|
import json
from pathlib import Path
import pytest
import nio
from opsdroid.connector.matrix import ConnectorMatrix
################################################################################
# Connector and config fixtures.
################################################################################
@pytest.fixture
def default_config(mock_api_obj):
return {"homeserver": mock_api_obj.base_url, "rooms": {"main": "#test:localhost"}}
@pytest.fixture
def login_config(mock_api_obj):
return {
"mxid": "@opsdroid:localhost",
"password": "supersecret",
"homeserver": mock_api_obj.base_url,
"rooms": {"main": "#test:localhost"},
}
@pytest.fixture
def token_config(mock_api_obj):
return {
"access_token": "token",
"homeserver": mock_api_obj.base_url,
"rooms": {"main": "#test:localhost"},
}
@pytest.fixture
async def connector(opsdroid, request, mock_api_obj, mocker):
if hasattr(request, "param"):
fix_name = request.param
else:
marker = request.node.get_closest_marker("matrix_connector_config")
fix_name = marker.args[0]
if isinstance(fix_name, str):
config = request.getfixturevalue(fix_name)
elif isinstance(fix_name, dict):
config = fix_name
else:
raise TypeError(
"Config should be a string name for a fixture or a dict for the config"
)
if "homeserver" not in config:
config["homeserver"] = mock_api_obj.base_url
conn = ConnectorMatrix(config, opsdroid=opsdroid)
conn.connection = mocker.MagicMock()
yield conn
if isinstance(conn.connection, nio.AsyncClient):
await conn.disconnect()
@pytest.fixture
async def connector_connected(
opsdroid,
double_filter_response,
single_message_sync_response,
mock_whoami_join,
mock_api,
):
config = {
"access_token": "token",
"homeserver": mock_api.base_url,
"rooms": {"main": "#test:localhost"},
}
conn = ConnectorMatrix(config, opsdroid=opsdroid)
await conn.connect()
yield conn
await conn.disconnect()
################################################################################
# Sync Factory
################################################################################
def get_matrix_response(name):
return Path(__file__).parent / "responses" / f"{name}.json"
def empty_sync(room_id):
with open(get_matrix_response("empty_sync_response")) as fobj:
empty_sync = json.load(fobj)
room_block = empty_sync["rooms"]["join"].pop("ROOMID")
empty_sync["rooms"]["join"][room_id] = room_block
return empty_sync
def event_factory(event_type, content, sender):
return {
"content": content,
"event_id": "$1234567890987654321234567890",
"origin_server_ts": 9876543210,
"sender": sender,
"type": event_type,
"unsigned": {"age": 100},
}
def message_factory(body, msgtype, sender, **extra_content):
content = {"msgtype": msgtype, "body": body, **extra_content}
return event_factory("m.room.message", content, sender)
def sync_response(events, room_id="!12345:localhost"):
sync = empty_sync(room_id)
sync["rooms"]["join"][room_id]["timeline"]["events"] = events
return sync
@pytest.fixture
def message_sync_response(request):
room_id = "!12345:localhost"
room_id_markers = [
marker for marker in request.node.own_markers if marker.name == "sync_room_id"
]
if room_id:
room_id = room_id_markers[0].args[0]
markers = [
marker
for marker in request.node.own_markers
if marker.name == "add_sync_messsage"
]
events = []
for marker in markers:
events.append(message_factory(*marker.args, **marker.kwargs))
return sync_response(events, room_id=room_id)
################################################################################
# Response Fixtures
################################################################################
@pytest.fixture
def double_filter_response(mock_api_obj):
mock_api_obj.add_response(
"/_matrix/client/r0/user/@opsdroid:localhost/filter",
"POST",
{"filter_id": "1234567890"},
)
mock_api_obj.add_response(
"/_matrix/client/r0/user/@opsdroid:localhost/filter",
"POST",
{"filter_id": "0987654321"},
)
@pytest.fixture
def single_message_sync_response(mock_api_obj):
"""Return a sync response with a single test message in it."""
event = message_factory("Test", "m.text", "@stuart:localhost")
sync = sync_response([event])
mock_api_obj.add_response("/_matrix/client/r0/sync", "GET", sync)
@pytest.fixture
def mock_whoami_join(mock_api_obj):
mock_api_obj.add_response(
"/_matrix/client/r0/account/whoami", "GET", {"user_id": "@opsdroid:localhost"}
)
mock_api_obj.add_response(
"/_matrix/client/r0/join/#test:localhost",
"POST",
{"room_id": "!12355:localhost"},
)
|
faae43450301555e58b90bf66bed3af7f427f3ed
|
d767236a45cc8a88ff652411cb96611f256f34c0
|
/keep/commands/cmd_pull.py
|
446b9049a31135818e44eb39ba0e4a3d10fcfb7e
|
[
"MIT"
] |
permissive
|
OrkoHunter/keep
|
99dd90b6c1dd727de4e2b6193b9ac5b0fc02c5da
|
e6eadb926464af1ed41e9c8c69b72882b6bb1d77
|
refs/heads/master
| 2023-05-25T02:22:34.512630
| 2023-05-20T01:49:42
| 2023-05-20T01:49:42
| 46,737,100
| 609
| 69
|
MIT
| 2023-05-20T01:49:43
| 2015-11-23T17:36:10
|
Python
|
UTF-8
|
Python
| false
| false
| 883
|
py
|
cmd_pull.py
|
import click
import os
from keep import cli, utils
from github import Github
@click.command('pull', short_help='Pull commands from saved GitHub gist.')
@cli.pass_context
def cli(ctx):
"""Pull commands from saved GitHub gist."""
commands_file_path = os.path.join(utils.dir_path, 'commands.json')
token = utils.get_github_token()
if not token:
return
hub = Github(token['token'])
gist = hub.get_gist(token['gist'])
gist_url = f"https://gist.github.com/{token['gist']}"
prompt_str = f"[CRITICAL] Replace local commands with GitHub gist\nGist URL : {gist_url} ?"
if click.confirm(prompt_str, abort=True):
pass
"""Using `w+` so it create the file if doesn't exist (Issue #64)"""
with open(commands_file_path, 'w+') as commands_file:
commands_file.write(gist.files['commands.json'].content)
click.echo("Done!")
|
3bb4553bad32ed5d6d017f50107c5b3b22cbc5ca
|
9a2bbc25016326b3b1da275e3b9d9a3c5c5878a6
|
/project/game/ai/defence/yaku_analyzer/atodzuke.py
|
ba9fc78cfa9169b6ca27edd382ce9df7ac879745
|
[
"MIT"
] |
permissive
|
MahjongRepository/tenhou-python-bot
|
3daabf510d58dfe7525bccf0df1575f027b632d2
|
112b08faab08ee862813de06cb5acc5db1c4feb0
|
refs/heads/dev
| 2023-07-20T14:51:02.101557
| 2023-07-08T10:11:47
| 2023-07-08T10:11:47
| 56,445,019
| 217
| 75
|
MIT
| 2023-05-23T02:05:41
| 2016-04-17T15:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
atodzuke.py
|
from game.ai.defence.yaku_analyzer.yaku_analyzer import YakuAnalyzer
from game.ai.helpers.defence import TileDanger
from mahjong.utils import is_honor
class AtodzukeAnalyzer(YakuAnalyzer):
id = "atodzuke_yakuhai"
def __init__(self, enemy):
self.enemy = enemy
def serialize(self):
return {"id": self.id}
# we must check atodzuke after all other yaku and only if there are no other yaku
# so activation check is on the caller's side
def is_yaku_active(self):
return True
def melds_han(self):
return 1
def get_safe_tiles_34(self):
safe_tiles = []
for x in range(0, 34):
if not is_honor(x):
safe_tiles.append(x)
elif not self.enemy.valued_honors.count(x):
safe_tiles.append(x)
return safe_tiles
def get_bonus_danger(self, tile_136, number_of_revealed_tiles):
bonus_danger = []
tile_34 = tile_136 // 4
number_of_yakuhai = self.enemy.valued_honors.count(tile_34)
if number_of_yakuhai > 0 and number_of_revealed_tiles < 3:
bonus_danger.append(TileDanger.ATODZUKE_YAKUHAI_HONOR_BONUS_DANGER)
return bonus_danger
def is_absorbed(self, possible_yaku, tile_34=None):
return len(possible_yaku) > 1
|
f0add6b5321654c9a250a353878faa9900a90a04
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli-core/azure/cli/core/aaz/_command_ctx.py
|
40409230a10c4157f380eee2aa79266466a13d41
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,401
|
py
|
_command_ctx.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, too-many-instance-attributes, protected-access, not-callable
from azure.cli.core._profile import Profile
from azure.cli.core.azclierror import InvalidArgumentValueError
from ._arg_action import AAZArgActionOperations, AAZGenericUpdateAction
from ._base import AAZUndefined
from ._field_type import AAZObjectType
from ._field_value import AAZObject
from ._selector import AAZSelectors
from .exceptions import AAZInvalidArgValueError
class AAZCommandCtx:
def __init__(self, cli_ctx, schema, command_args, no_wait_arg=None):
self._cli_ctx = cli_ctx
self._profile = Profile(cli_ctx=cli_ctx)
self._subscription_id = None
self.args = schema()
assert self.args._is_patch # make sure self.ctx.args is patch
for dest, cmd_arg in command_args.items():
if hasattr(schema, dest):
if isinstance(cmd_arg, AAZArgActionOperations):
cmd_arg.apply(self.args, dest)
elif cmd_arg != AAZUndefined:
self.args[dest] = cmd_arg
elif dest == "subscription":
# support to specify the command's subscription when call AAZCommand directly in code
if isinstance(cmd_arg, str):
self._subscription_id = cmd_arg
self._clients = {}
self._vars_schema = AAZObjectType()
self.vars = AAZObject(schema=self._vars_schema, data={})
self.selectors = AAZSelectors()
self.generic_update_args = command_args.get(AAZGenericUpdateAction.DEST, None)
# support no wait
self.lro_no_wait = command_args.get(no_wait_arg, False) if no_wait_arg else False
# support paging
self.next_link = AAZUndefined
self._aux_subscriptions = set()
self._aux_tenants = set()
def format_args(self):
try:
self.args._schema._fmt(ctx=self, value=self.args)
except AAZInvalidArgValueError as err:
raise InvalidArgumentValueError(str(err))
def get_login_credential(self):
credential, _, _ = self._profile.get_login_credentials(
subscription_id=self.subscription_id,
aux_subscriptions=self.aux_subscriptions,
aux_tenants=self.aux_tenants
)
return credential
def get_http_client(self, client_type):
from ._client import registered_clients
if client_type not in self._clients:
# if not client instance exist, then create a client instance
from azure.cli.core.commands.client_factory import _prepare_client_kwargs_track2
assert client_type
client_cls = registered_clients[client_type]
credential = self.get_login_credential()
client_kwargs = _prepare_client_kwargs_track2(self._cli_ctx)
client_kwargs['user_agent'] += " (AAZ)" # Add AAZ label in user agent
self._clients[client_type] = client_cls(self._cli_ctx, credential, **client_kwargs)
return self._clients[client_type]
def set_var(self, name, data, schema_builder=None):
if not hasattr(self._vars_schema, name):
assert schema_builder is not None
self._vars_schema[name] = schema_builder()
self.vars[name] = data
@staticmethod
def get_error_format(name):
if name is None:
return None
from ._error_format import registered_error_formats
return registered_error_formats[name]
@property
def subscription_id(self):
from azure.cli.core.commands.client_factory import get_subscription_id
if self._subscription_id is None:
self._subscription_id = get_subscription_id(cli_ctx=self._cli_ctx)
return self._subscription_id
@property
def profile(self):
return self._profile
def update_aux_subscriptions(self, subscription_id):
if subscription_id == self._subscription_id:
return
self._aux_subscriptions.add(subscription_id)
def update_aux_tenants(self, tenant_id):
self._aux_tenants.add(tenant_id)
@property
def aux_subscriptions(self):
return list(self._aux_subscriptions) or None
@property
def aux_tenants(self):
return list(self._aux_tenants) or None
def get_subscription_locations(ctx: AAZCommandCtx):
from azure.cli.core.commands.parameters import get_subscription_locations as _get_subscription_locations
return _get_subscription_locations(ctx._cli_ctx)
def get_resource_group_location(ctx: AAZCommandCtx, rg_name: str):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
from azure.core.exceptions import ResourceNotFoundError
resource_client = get_mgmt_service_client(ctx._cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
try:
rg = resource_client.resource_groups.get(rg_name)
except ResourceNotFoundError:
return AAZUndefined
return rg.location
|
7c5e625480db1bd97a02bb0527b4ab7f8ce7f0b2
|
18ad3a6818cd9d8243e2fe41d65bc76530e6dbae
|
/tools/parse_test_res.py
|
fd5b0189d4ef130b0e772d483cf4c1e5643dff06
|
[
"MIT"
] |
permissive
|
KaiyangZhou/deep-person-reid
|
55fe2fa0306847c5447d1c70fecd1f585758ae5e
|
566a56a2cb255f59ba75aa817032621784df546a
|
refs/heads/master
| 2023-09-02T21:55:37.749729
| 2023-02-08T02:52:17
| 2023-02-08T02:52:17
| 124,800,162
| 4,191
| 1,167
|
MIT
| 2023-05-23T11:41:21
| 2018-03-11T21:14:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,976
|
py
|
parse_test_res.py
|
"""
This script aims to automate the process of calculating average results
stored in the test.log files over multiple splits.
How to use:
For example, you have done evaluation over 20 splits on VIPeR, leading to
the following file structure
log/
eval_viper/
split_0/
test.log-xxxx
split_1/
test.log-xxxx
split_2/
test.log-xxxx
...
You can run the following command in your terminal to get the average performance:
$ python tools/parse_test_res.py log/eval_viper
"""
import os
import re
import glob
import numpy as np
import argparse
from collections import defaultdict
from torchreid.utils import check_isfile, listdir_nohidden
def parse_file(filepath, regex_mAP, regex_r1, regex_r5, regex_r10, regex_r20):
results = {}
with open(filepath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
match_mAP = regex_mAP.search(line)
if match_mAP:
mAP = float(match_mAP.group(1))
results['mAP'] = mAP
match_r1 = regex_r1.search(line)
if match_r1:
r1 = float(match_r1.group(1))
results['r1'] = r1
match_r5 = regex_r5.search(line)
if match_r5:
r5 = float(match_r5.group(1))
results['r5'] = r5
match_r10 = regex_r10.search(line)
if match_r10:
r10 = float(match_r10.group(1))
results['r10'] = r10
match_r20 = regex_r20.search(line)
if match_r20:
r20 = float(match_r20.group(1))
results['r20'] = r20
return results
def main(args):
regex_mAP = re.compile(r'mAP: ([\.\deE+-]+)%')
regex_r1 = re.compile(r'Rank-1 : ([\.\deE+-]+)%')
regex_r5 = re.compile(r'Rank-5 : ([\.\deE+-]+)%')
regex_r10 = re.compile(r'Rank-10 : ([\.\deE+-]+)%')
regex_r20 = re.compile(r'Rank-20 : ([\.\deE+-]+)%')
final_res = defaultdict(list)
directories = listdir_nohidden(args.directory, sort=True)
num_dirs = len(directories)
for directory in directories:
fullpath = os.path.join(args.directory, directory)
filepath = glob.glob(os.path.join(fullpath, 'test.log*'))[0]
check_isfile(filepath)
print(f'Parsing {filepath}')
res = parse_file(
filepath, regex_mAP, regex_r1, regex_r5, regex_r10, regex_r20
)
for key, value in res.items():
final_res[key].append(value)
print('Finished parsing')
print(f'The average results over {num_dirs} splits are shown below')
for key, values in final_res.items():
mean_val = np.mean(values)
print(f'{key}: {mean_val:.1f}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help='Path to directory')
args = parser.parse_args()
main(args)
|
31244b9a94674da0a9adb370d85d19452aade290
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/379_Design_Phone_Directory.py
|
07558d24dd605d46cd06e69f6e812a7943b4e626
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
379_Design_Phone_Directory.py
|
import heapq
class PhoneDirectory(object):
def __init__(self, maxNumbers):
"""
Initialize your data structure here
@param maxNumbers - The maximum numbers that can be stored in the phone directory.
:type maxNumbers: int
"""
self.phoneDictSts = {}
self.availableNums = []
for i in range(maxNumbers):
self.phoneDictSts[i] = True
heapq.heappush(self.availableNums, i)
def get(self):
"""
Provide a number which is not assigned to anyone.
@return - Return an available number. Return -1 if none is available.
:rtype: int
"""
if len(self.availableNums) > 0:
num = heapq.heappop(self.availableNums)
self.phoneDictSts[num] = False
return num
else:
return -1
def check(self, number):
"""
Check if a number is available or not.
:type number: int
:rtype: bool
"""
if self.phoneDictSts[number] == True:
return True
else:
return False
def release(self, number):
"""
Recycle or release a number.
:type number: int
:rtype: None
"""
if self.phoneDictSts[number] == False:
self.phoneDictSts[number] = True
heapq.heappush(self.availableNums, number)
# Your PhoneDirectory object will be instantiated and called as such:
# obj = PhoneDirectory(maxNumbers)
# param_1 = obj.get()
# param_2 = obj.check(number)
# obj.release(number)
|
8f70a02ee9923aef72238cd7d78b42d38196b443
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/tests/python/common/backend/test_tensor.py
|
2f0bed1be25cd30577ffe9d02fe02179ef9bb35d
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
test_tensor.py
|
import unittest
import backend as F
import dgl
import dgl.ndarray as nd
import numpy as np
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="TF doesn't support inplace update",
)
def test_dlpack():
# test dlpack conversion.
def nd2th():
ans = np.array(
[[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
x = nd.array(np.zeros((3, 4), dtype=np.float32))
dl = x.to_dlpack()
y = F.zerocopy_from_dlpack(dl)
y[0] = 1
print(x)
print(y)
assert np.allclose(x.asnumpy(), ans)
def th2nd():
ans = np.array(
[[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
x = F.zeros((3, 4))
dl = F.zerocopy_to_dlpack(x)
y = nd.from_dlpack(dl)
x[0] = 1
print(x)
print(y)
assert np.allclose(y.asnumpy(), ans)
def th2nd_incontiguous():
x = F.astype(F.tensor([[0, 1], [2, 3]]), F.int64)
ans = np.array([0, 2])
y = x[:2, 0]
# Uncomment this line and comment the one below to observe error
# dl = dlpack.to_dlpack(y)
dl = F.zerocopy_to_dlpack(y)
z = nd.from_dlpack(dl)
print(x)
print(z)
assert np.allclose(z.asnumpy(), ans)
nd2th()
th2nd()
th2nd_incontiguous()
|
c6ba3191f3612fd54402051d93dded02b5c6e38e
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster_tests/general_tests/py3_tests/test_inference.py
|
6b0c48334b6ec62313e625db8a4bb5354a22622e
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 14,694
|
py
|
test_inference.py
|
# ruff: noqa: D416
from typing import Any, Dict, List, Optional, Tuple
import pytest
from dagster import (
DagsterInvalidDefinitionError,
DagsterType,
In,
Int,
graph,
job,
make_python_type_usable_as_dagster_type,
op,
usable_as_dagster_type,
)
from dagster._core.definitions.inference import infer_input_props, infer_output_props
from dagster._core.types.dagster_type import DagsterTypeKind
from dagster._utils.test import wrap_op_in_graph_and_execute
def test_infer_op_description_from_docstring():
@op
def my_op(_):
"""Here is some docstring."""
assert my_op.description == "Here is some docstring."
def test_infer_op_description_no_docstring():
@op
def my_op(_):
pass
assert my_op.description is None
def test_docstring_does_not_override():
@op(description="abc")
def my_op(_):
"""Here is some docstring."""
assert my_op.description == "abc"
def test_single_typed_input():
@op
def add_one_infer(_context, num: int):
return num + 1
@op(ins={"num": In(Int)})
def add_one_ex(_context, num):
return num + 1
assert len(add_one_infer.input_defs) == 1
assert add_one_ex.input_defs[0].name == add_one_infer.input_defs[0].name
assert (
add_one_ex.input_defs[0].dagster_type.unique_name
== add_one_infer.input_defs[0].dagster_type.unique_name
)
def test_precedence():
@op(ins={"num": In(Int)})
def add_one(_context, num: Any):
return num + 1
assert add_one.input_defs[0].dagster_type.unique_name == "Int"
def test_double_typed_input():
@op
def subtract(_context, num_one: int, num_two: int):
return num_one + num_two
assert subtract
assert len(subtract.input_defs) == 2
assert subtract.input_defs[0].name == "num_one"
assert subtract.input_defs[0].dagster_type.unique_name == "Int"
assert subtract.input_defs[1].name == "num_two"
assert subtract.input_defs[1].dagster_type.unique_name == "Int"
def test_single_typed_input_and_output():
@op
def add_one(_context, num: int) -> int:
return num + 1
assert add_one
assert len(add_one.input_defs) == 1
assert add_one.input_defs[0].name == "num"
assert add_one.input_defs[0].dagster_type.unique_name == "Int"
assert len(add_one.output_defs) == 1
assert add_one.output_defs[0].dagster_type.unique_name == "Int"
def test_single_typed_input_and_output_lambda():
@op
def add_one(num: int) -> int:
return num + 1
assert add_one
assert len(add_one.input_defs) == 1
assert add_one.input_defs[0].name == "num"
assert add_one.input_defs[0].dagster_type.unique_name == "Int"
assert len(add_one.output_defs) == 1
assert add_one.output_defs[0].dagster_type.unique_name == "Int"
def test_string_typed_input_and_output():
@op
def add_one(_context, num: "Optional[int]") -> "int":
return num + 1 if num else 1
assert add_one
assert len(add_one.input_defs) == 1
assert add_one.input_defs[0].name == "num"
assert add_one.input_defs[0].dagster_type.display_name == "Int?"
assert len(add_one.output_defs) == 1
assert add_one.output_defs[0].dagster_type.unique_name == "Int"
def _make_foo():
class Foo:
pass
def foo(x: "Foo") -> "Foo":
return x
return foo
def test_invalid_string_typed_input():
with pytest.raises(
DagsterInvalidDefinitionError, match='Failed to resolve type annotation "Foo"'
):
op(_make_foo())
def test_wrapped_input_and_output_lambda():
@op
def add_one(nums: List[int]) -> Optional[List[int]]:
return [num + 1 for num in nums]
assert add_one
assert len(add_one.input_defs) == 1
assert add_one.input_defs[0].name == "nums"
assert add_one.input_defs[0].dagster_type.kind == DagsterTypeKind.LIST
assert add_one.input_defs[0].dagster_type.inner_type.unique_name == "Int"
assert len(add_one.output_defs) == 1
assert add_one.output_defs[0].dagster_type.kind == DagsterTypeKind.NULLABLE
assert add_one.output_defs[0].dagster_type.inner_type.kind == DagsterTypeKind.LIST
def test_kitchen_sink():
@usable_as_dagster_type
class Custom:
pass
@op
def sink(
n: int,
f: float,
b: bool,
s: str,
x: Any,
o: Optional[str],
m: List[str],
c: Custom,
):
pass
assert sink.input_defs[0].name == "n"
assert sink.input_defs[0].dagster_type.unique_name == "Int"
assert sink.input_defs[1].name == "f"
assert sink.input_defs[1].dagster_type.unique_name == "Float"
assert sink.input_defs[2].name == "b"
assert sink.input_defs[2].dagster_type.unique_name == "Bool"
assert sink.input_defs[3].name == "s"
assert sink.input_defs[3].dagster_type.unique_name == "String"
assert sink.input_defs[4].name == "x"
assert sink.input_defs[4].dagster_type.unique_name == "Any"
assert sink.input_defs[5].name == "o"
assert sink.input_defs[5].dagster_type.kind == DagsterTypeKind.NULLABLE
assert sink.input_defs[6].name == "m"
assert sink.input_defs[6].dagster_type.kind == DagsterTypeKind.LIST
assert sink.input_defs[7].name == "c"
assert sink.input_defs[7].dagster_type.unique_name == "Custom"
def test_composites():
@op
def emit_one() -> int:
return 1
@op
def subtract(n1: int, n2: int) -> int:
return n1 - n2
@graph
def add_one(a: int) -> int:
return subtract(a, emit_one())
assert add_one.input_mappings
def test_emit_dict():
@op
def emit_dict() -> dict:
return {"foo": "bar"}
solid_result = wrap_op_in_graph_and_execute(emit_dict)
assert solid_result.output_value() == {"foo": "bar"}
def test_dict_input():
@op
def intake_dict(inp: dict) -> str:
return inp["foo"]
solid_result = wrap_op_in_graph_and_execute(intake_dict, input_values={"inp": {"foo": "bar"}})
assert solid_result.output_value() == "bar"
def test_emit_dagster_dict():
@op
def emit_dagster_dict() -> Dict:
return {"foo": "bar"}
solid_result = wrap_op_in_graph_and_execute(emit_dagster_dict)
assert solid_result.output_value() == {"foo": "bar"}
def test_dict_dagster_input():
@op
def intake_dagster_dict(inp: Dict) -> str:
return inp["foo"]
solid_result = wrap_op_in_graph_and_execute(
intake_dagster_dict, input_values={"inp": {"foo": "bar"}}
)
assert solid_result.output_value() == "bar"
def test_python_tuple_input():
@op
def intake_tuple(inp: tuple) -> int:
return inp[1]
assert (
wrap_op_in_graph_and_execute(intake_tuple, input_values={"inp": (3, 4)}).output_value() == 4
)
def test_python_tuple_output():
@op
def emit_tuple() -> tuple:
return (4, 5)
assert wrap_op_in_graph_and_execute(emit_tuple).output_value() == (4, 5)
def test_nested_kitchen_sink():
@op
def no_execute() -> Optional[List[Tuple[List[int], str, Dict[str, Optional[List[str]]]]]]:
pass
assert (
no_execute.output_defs[0].dagster_type.display_name
== "[Tuple[[Int],String,Dict[String,[String]?]]]?"
)
assert (
no_execute.output_defs[0].dagster_type.typing_type
== Optional[List[Tuple[List[int], str, Dict[str, Optional[List[str]]]]]]
)
def test_infer_input_description_from_docstring_failure():
# docstring is invalid because has a dash instead of a colon to delimit the argument type and
# description
@op
def my_op(_arg1):
"""
Args:
_arg1 - description of arg.
""" # noqa: D212
assert my_op
def test_infer_input_description_from_docstring_rest():
@op
def rest(_context, hello: str, optional: int = 5):
"""
:param str hello: hello world param
:param int optional: optional param, defaults to 5.
""" # noqa: D212
return hello + str(optional)
defs = infer_input_props(rest.compute_fn.decorated_fn, context_arg_provided=True)
assert len(defs) == 2
hello_param = defs[0]
assert hello_param.name == "hello"
assert hello_param.annotation == str
optional_param = defs[1]
assert optional_param.name == "optional"
assert optional_param.annotation == int
assert optional_param.default_value == 5
def test_infer_descriptions_from_docstring_numpy():
@op
def good_numpy(_context, hello: str, optional: int = 5):
"""
Test.
Parameters
----------
hello:
hello world param
optional:
optional param, default 5
""" # noqa: D212
return hello + str(optional)
defs = infer_input_props(good_numpy.compute_fn.decorated_fn, context_arg_provided=True)
assert len(defs) == 2
hello_param = defs[0]
assert hello_param.name == "hello"
assert hello_param.annotation == str
assert hello_param.description == "hello world param"
optional_param = defs[1]
assert optional_param.name == "optional"
assert optional_param.annotation == int
assert optional_param.default_value == 5
assert optional_param.description == "optional param, default 5"
def test_infer_descriptions_from_docstring_google():
@op
def good_google(_context, hello: str, optional: int = 5):
"""Test.
Args:
hello (str): hello world param
optional (int, optional): optional param. Defaults to 5.
"""
return hello + str(optional)
defs = infer_input_props(good_google.compute_fn.decorated_fn, context_arg_provided=True)
assert len(defs) == 2
hello_param = defs[0]
assert hello_param.name == "hello"
assert hello_param.annotation == str
assert hello_param.description == "hello world param"
optional_param = defs[1]
assert optional_param.name == "optional"
assert optional_param.annotation == int
assert optional_param.default_value == 5
assert optional_param.description == "optional param. Defaults to 5."
def test_infer_output_description_from_docstring_failure():
# docstring is invalid because has a dash instead of a colon to delimit the return type and
# description
@op
def google() -> int:
"""
Returns:
int - a number
""" # noqa: D212, D415
return 1
assert google
def test_infer_output_description_from_docstring_numpy():
@op
def numpy(_context) -> int:
"""
Returns
-------
int
a number.
""" # noqa: D212
return 1
props = infer_output_props(numpy.compute_fn.decorated_fn)
assert props.description == "a number."
assert props.annotation == int
def test_infer_output_description_from_docstring_rest():
@op
def rest(_context) -> int:
"""
:return int: a number.
""" # noqa: D212
return 1
props = infer_output_props(rest.compute_fn.decorated_fn)
assert props.description == "a number."
assert props.annotation == int
def test_infer_output_description_from_docstring_google():
@op
def google(_context) -> int:
"""
Returns:
int: a number.
""" # noqa: D212
return 1
props = infer_output_props(google.compute_fn.decorated_fn)
assert props.description == "a number."
assert props.annotation == int
def test_job_api_stability():
@job
def empty() -> None:
pass
# assert definition does not error
assert empty
def test_unregistered_type_annotation_output():
class MyClass:
pass
@op
def my_op(_) -> MyClass:
return MyClass()
assert my_op.output_defs[0].dagster_type.display_name == "MyClass"
assert my_op.output_defs[0].dagster_type.typing_type == MyClass
@job
def my_job():
my_op()
my_job.execute_in_process()
def test_unregistered_type_annotation_input():
class MyClass:
pass
@op
def op1(_):
return MyClass()
@op
def op2(_, _input1: MyClass):
pass
@job
def my_job():
op2(op1())
assert op2.input_defs[0].dagster_type.display_name == "MyClass"
my_job.execute_in_process()
def test_unregistered_type_annotation_input_op():
class MyClass:
pass
@op
def op2(_, _input1: MyClass):
pass
assert op2.input_defs[0].dagster_type.display_name == "MyClass"
def test_unregistered_type_annotation_input_op_merge():
class MyClass:
pass
@op(ins={"_input1": In()})
def op2(_input1: MyClass):
pass
assert op2.input_defs[0].dagster_type.display_name == "MyClass"
def test_use_auto_type_twice():
class MyClass:
pass
@op
def my_op(_) -> MyClass:
return MyClass()
@op
def my_op_2(_) -> MyClass:
return MyClass()
@job
def my_job():
my_op()
my_op_2()
my_job.execute_in_process()
def test_register_after_op_definition():
class MyClass:
pass
@op
def _my_op(_) -> MyClass:
return MyClass()
my_dagster_type = DagsterType(name="aaaa", type_check_fn=lambda _, _a: True)
with pytest.raises(DagsterInvalidDefinitionError):
make_python_type_usable_as_dagster_type(MyClass, my_dagster_type)
def test_same_name_different_modules():
class MyClass:
pass
from dagster_tests.general_tests.py3_tests.other_module import MyClass as OtherModuleMyClass
@op
def my_op(_) -> MyClass:
return MyClass()
@op
def my_op_2(_) -> OtherModuleMyClass:
return OtherModuleMyClass()
@job
def my_job():
my_op()
my_op_2()
my_job.execute_in_process()
def test_fan_in():
class MyClass:
pass
@op
def upstream_op(_):
return MyClass()
@op
def downstream_op(_, _input: List[MyClass]):
pass
@job
def my_job():
downstream_op([upstream_op.alias("a")(), upstream_op.alias("b")()])
assert downstream_op.input_defs[0].dagster_type.display_name == "[MyClass]"
assert downstream_op.input_defs[0].dagster_type.typing_type == List[MyClass]
my_job.execute_in_process()
def test_composites_user_defined_type():
class MyClass:
pass
@op
def emit_one() -> MyClass:
return MyClass()
@op
def subtract(_n1: MyClass, _n2: MyClass) -> MyClass:
return MyClass()
@graph
def add_one(a: MyClass) -> MyClass:
return subtract(a, emit_one())
assert add_one.input_mappings
|
29b604a453abe583614fdc6f768194827caf1b83
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py
|
14093322cc1661dda219bdcfb87124986ada7706
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,924
|
py
|
test_auto_FitAsl.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..asl import FitAsl
def test_FitAsl_inputs():
input_map = dict(
args=dict(
argstr="%s",
),
cbf_file=dict(
argstr="-cbf %s",
extensions=None,
name_source=["source_file"],
name_template="%s_cbf.nii.gz",
),
dpld=dict(
argstr="-dPLD %f",
),
dt_inv2=dict(
argstr="-dTinv2 %f",
),
eff=dict(
argstr="-eff %f",
),
environ=dict(
nohash=True,
usedefault=True,
),
error_file=dict(
argstr="-error %s",
extensions=None,
name_source=["source_file"],
name_template="%s_error.nii.gz",
),
gm_plasma=dict(
argstr="-gmL %f",
),
gm_t1=dict(
argstr="-gmT1 %f",
),
gm_ttt=dict(
argstr="-gmTTT %f",
),
ir_output=dict(
argstr="-IRoutput %s",
extensions=None,
),
ir_volume=dict(
argstr="-IRvolume %s",
extensions=None,
),
ldd=dict(
argstr="-LDD %f",
),
m0map=dict(
argstr="-m0map %s",
extensions=None,
),
m0mape=dict(
argstr="-m0mape %s",
extensions=None,
),
mask=dict(
argstr="-mask %s",
extensions=None,
position=2,
),
mul=dict(
argstr="-mul %f",
),
mulgm=dict(
argstr="-sig",
),
out=dict(
argstr="-out %f",
),
pasl=dict(
argstr="-pasl",
),
pcasl=dict(
argstr="-pcasl",
),
plasma_coeff=dict(
argstr="-L %f",
),
pld=dict(
argstr="-PLD %f",
),
pv0=dict(
argstr="-pv0 %d",
),
pv2=dict(
argstr="-pv2 %d",
),
pv3=dict(
argstr="-pv3 %d %d %d",
),
pv_threshold=dict(
argstr="-pvthreshold",
),
seg=dict(
argstr="-seg %s",
extensions=None,
),
segstyle=dict(
argstr="-segstyle",
),
sig=dict(
argstr="-sig",
),
source_file=dict(
argstr="-source %s",
extensions=None,
mandatory=True,
position=1,
),
syn_file=dict(
argstr="-syn %s",
extensions=None,
name_source=["source_file"],
name_template="%s_syn.nii.gz",
),
t1_art_cmp=dict(
argstr="-T1a %f",
),
t1map=dict(
argstr="-t1map %s",
extensions=None,
),
t_inv1=dict(
argstr="-Tinv1 %f",
),
t_inv2=dict(
argstr="-Tinv2 %f",
),
wm_plasma=dict(
argstr="-wmL %f",
),
wm_t1=dict(
argstr="-wmT1 %f",
),
wm_ttt=dict(
argstr="-wmTTT %f",
),
)
inputs = FitAsl.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_FitAsl_outputs():
output_map = dict(
cbf_file=dict(
extensions=None,
),
error_file=dict(
extensions=None,
),
syn_file=dict(
extensions=None,
),
)
outputs = FitAsl.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
3a54f4062d414fdef36612445770870fdc1e0d0f
|
7c91ff850f81bf8759b055971d592a71ef025732
|
/tools/gengl.py
|
cb2afea8619c916796d174b66d0fc2bf0ba610ca
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
pyglet/pyglet
|
d9da2cccd52a6bc5c09548536876602f6e1412f0
|
094c638f0529fecab4e74556487b92453a78753c
|
refs/heads/master
| 2023-08-17T15:08:09.192350
| 2023-08-17T01:51:50
| 2023-08-17T01:51:50
| 191,043,601
| 1,687
| 427
|
BSD-3-Clause
| 2023-09-14T08:51:31
| 2019-06-09T18:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,237
|
py
|
gengl.py
|
"""
Generate gl.py and gl_compat.py
See : tools/requirements.txt
We are using the opengl-registry project to extract this information from
https://raw.githubusercontent.com/KhronosGroup/OpenGL-Registry/master/xml/gl.xml
A local version gl.xml can also be used.
Usage:
# Fetch gl.xml from Khronos Github repo
python gengl.py
python gengl.py --source url
# Use local gl.xml
python gengl.py --source local
"""
import sys
from argparse import ArgumentParser
from pathlib import Path
from opengl_registry import Registry, RegistryReader
REPO_ROOT = Path(__file__).parent.parent.resolve()
DEST_PATH = REPO_ROOT / "pyglet" / "gl"
def main():
values = parse_args(sys.argv[1:])
if values.source == "url":
# Fetch gl.xml from Khronos Github repo
reader = RegistryReader.from_url()
else:
# Use the local gl.xml file
reader = RegistryReader.from_file(Path(REPO_ROOT / "tools" / "gl.xml"))
registry = reader.read()
# OpenGL extensions we want to include
extensions = [
"GL_ARB_multisample",
"EXT_texture_compression_s3tc", # For pyglet.image.codecs.dds
"GL_EXT_framebuffer_object", # Needed for GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS
"GL_ARB_bindless_texture",
"GL_ARB_gpu_shader_int64",
"GL_NV_mesh_shader",
]
core_profile = registry.get_profile(
api="gl",
profile="core",
version="4.6",
extensions=extensions,
)
compat_profile = registry.get_profile(
api="gl",
profile="compat",
version="4.6",
extensions=extensions,
)
# es_profile = registry.get_profile(
# api="gles2",
# profile="core",
# version="3.2",
# extensions=extensions,
# )
core_writer = PygletGLWriter(registry=core_profile, out_file=DEST_PATH / "gl.py")
core_writer.run()
compat_writer = PygletGLWriter(registry=compat_profile, out_file=DEST_PATH / "gl_compat.py")
compat_writer.run()
# es_writer = PygletGLWriter(registry=es_profile, out_file=DEST_PATH / "gl_es.py")
# es_writer.run()
def parse_args(args):
parser = ArgumentParser()
parser.add_argument("--source", choices=["local", "url"], default="url")
return parser.parse_args(args)
class PygletGLWriter:
"""Writes gl.py, gl_compat.py, and gl_es.py"""
# All gl types manually matched to ctypes.
# Inspect registry.types
types = {
"GLenum": "c_uint",
"GLboolean": "c_ubyte",
"GLbitfield": "c_uint",
"GLvoid": "None",
"GLbyte": "c_char",
"GLubyte": "c_ubyte",
"GLshort": "c_short",
"GLushort": "c_ushort",
"GLint": "c_int",
"GLuint": "c_uint",
"GLclampx": "c_uint",
"GLsizei": "c_int",
"GLfloat": "c_float",
"GLclampf": "c_float",
"GLdouble": "c_double",
"GLclampd": "c_double",
"GLchar": "c_char",
"GLintptr": "c_ptrdiff_t",
"GLsizeiptr": "c_ptrdiff_t",
"GLint64": "c_int64",
"GLuint64": "c_uint64",
"GLuint64EXT": "c_uint64",
"GLsync": "POINTER(struct___GLsync)",
"GLDEBUGPROC": "CFUNCTYPE(None, GLenum, GLenum, GLuint, GLenum, GLsizei, POINTER(GLchar), POINTER(GLvoid))",
}
exclude_commands = set()
def __init__(self, *, registry: Registry, out_file: Path):
self._registry = registry
self._out_file = out_file
self._out = None
self._all = [] # Entries for __all__
def run(self):
"""Write the file and close"""
self._out = open(self._out_file, mode='w')
self.write_header()
self.write_types()
self.write_enums()
self.write_commands()
self.write_footer()
self._out.close()
def write(self, content: str):
"""Write out a string to the out file"""
self._out.write(content)
def write_lines(self, lines: str):
"""Write one or several lines to the out file"""
for line in lines:
self._out.write(line)
self._out.write("\n")
def write_header(self):
"""Write the header"""
with open(REPO_ROOT / "tools" / "gl.template") as fd:
self.write(fd.read())
def write_types(self):
"""Write all types"""
self.write_lines(["# GL type definitions"])
self.write_lines([f"{k} = {v}" for k, v in self.types.items()])
self.write_lines([""])
self._all.extend(self.types.keys())
def write_enums(self):
"""Write all enums"""
self.write_lines(["# GL enumerant (token) definitions"])
self.write_lines([
f"{e.name} = {e.value_int}"
for e in sorted(self._registry.enums.values())
])
self.write_lines([""])
self._all.extend(self._registry.enums.keys())
def write_commands(self):
"""Write all commands"""
self.write_lines(["# GL command definitions"])
# _link_function params : name, restype, argtypes, requires=None, suggestions=None
for cmd in sorted(self._registry.commands.values()):
if cmd.name in self.exclude_commands:
continue
# Return type: If the function returns a pointer type ...
if "*" in cmd.proto:
restype = f"POINTER({cmd.ptype})"
else:
restype = cmd.ptype or "None"
# Arguments can be pointer and pointer-pointer
arguments = []
for param in cmd.params:
# print(cmd.name, param.name, param.ptype, "|", param.value)
# Detect void pointers. They don't have a ptype set
if "void" in param.value:
arguments.append("POINTER(GLvoid)")
else:
# Ensure we actually know what the type is
if not self.types.get(param.ptype):
raise ValueError(f"ptype {param.ptype} not a known type")
# Handle pointer-pointer and pointers: *, **, *const*
if param.value.count("*") == 2:
arguments.append(f"POINTER(POINTER({param.ptype}))")
elif param.value.count("*") == 1:
arguments.append(f"POINTER({param.ptype})")
else:
arguments.append(param.ptype)
argtypes = ", ".join(arguments)
requires = f"OpenGL {cmd.requires}" if cmd.requires else "None"
# NOTE: PROCs are optional
# proc_name = f"PFN{cmd.name.upper()}PROC"
self.write_lines([
f"{cmd.name} = _link_function('{cmd.name}', {restype}, [{argtypes}], requires='{requires}')",
# f"{proc_name} = CFUNCTYPE({restype}, {argtypes})",
])
self._all.append(cmd.name)
# self._all.append(proc_name)
self.write_lines([""])
def write_footer(self):
"""Write __all__ section"""
self.write_lines([
"",
"__all__ = [",
*[f" '{name}'," for name in self._all],
"]",
])
if __name__ == "__main__":
main()
|
95849bf36327dcb6c0dbec002ae9f2368594bb3b
|
e07885a1e08db9e335a0c88d578948aaf90b2af6
|
/genomepy/providers/ensembl.py
|
5bf592418cbddc11818b698c70e63563a793657e
|
[
"MIT"
] |
permissive
|
vanheeringen-lab/genomepy
|
79b1af88e2f292d8ea1662ca296d48b26ed5d229
|
17f1ec9622488721d97dda4bdc6a01938c023177
|
refs/heads/master
| 2023-06-25T03:47:41.354519
| 2023-06-14T13:36:03
| 2023-06-14T13:36:03
| 76,658,708
| 260
| 29
|
MIT
| 2023-05-30T12:17:38
| 2016-12-16T14:11:17
|
Python
|
UTF-8
|
Python
| false
| false
| 11,629
|
py
|
ensembl.py
|
import re
import requests
from loguru import logger
from genomepy.caching import cache_exp_genomes, cache_exp_other, disk_cache, lock
from genomepy.exceptions import GenomeDownloadError
from genomepy.online import check_url, retry
from genomepy.providers.base import BaseProvider
from genomepy.utils import safe
class EnsemblProvider(BaseProvider):
"""
Ensembl genome provider.
Will search both ensembl.org as well as ensemblgenomes.org.
The bacteria division is not yet supported.
"""
name = "Ensembl"
accession_fields = ["assembly_accession"]
taxid_fields = ["taxonomy_id"]
description_fields = [
"name",
"scientific_name",
"url_name",
"display_name",
]
_cli_install_options = {
"toplevel": {
"long": "toplevel",
"help": "always download toplevel-genome",
"flag_value": True,
},
"version": {
"long": "version",
"help": "select release version",
"type": int,
"default": None,
},
}
_url = "https://rest.ensembl.org/"
def __init__(self):
self._provider_status()
# Populate on init, so that methods can be cached
self.genomes = get_genomes(self._url)
@staticmethod
def ping():
"""Can the provider be reached?"""
api_online = bool(check_url("https://rest.ensembl.org/info/ping?"))
vertebrate_url_online = bool(check_url("http://ftp.ensembl.org"))
other_url_online = bool(check_url("http://ftp.ensemblgenomes.org"))
return api_online and vertebrate_url_online and other_url_online
def _genome_info_tuple(self, name, size=False):
"""tuple with assembly metadata"""
accession = self.assembly_accession(name)
taxid = self.genome_taxid(name)
annotations = bool(self.annotation_links(name))
species = self.genomes[name].get("scientific_name")
other = self.genomes[name].get("genebuild")
if size:
length = self.genomes[name]["base_count"]
return name, accession, taxid, annotations, species, length, other
return name, accession, taxid, annotations, species, other
def get_version(self, name: str, version=None) -> int:
"""
Retrieve the latest Ensembl or EnsemblGenomes release version,
or check if the requested release version exists.
"""
division, is_vertebrate = self.get_division(name)
if version is None:
latest_version = self.get_release(is_vertebrate)
return latest_version
if not str(version).isdecimal():
raise TypeError("Version must be a number")
version = int(version)
all_versions = self.get_releases(is_vertebrate)
ensembl = f"Ensembl{'' if is_vertebrate else 'Genomes'}"
if version not in all_versions:
raise ValueError(
f"{ensembl} release version {version} "
f"not found. Available versions: {all_versions}"
)
releases = self.releases_with_assembly(name)
if version not in releases:
raise FileNotFoundError(
f"{name} not found on {ensembl} release {version}. "
f"Available on release versions: {releases}"
)
return version
def get_division(self, name: str):
"""Retrieve the division of a genome."""
genome = self.genomes[safe(name)]
division = str(genome["division"]).lower().replace("ensembl", "")
if division == "bacteria":
raise NotImplementedError("Bacteria from Ensembl not supported.")
is_vertebrate = division == "vertebrates"
return division, is_vertebrate
@disk_cache.memoize(
expire=cache_exp_other, tag="get_release-ensembl", ignore={"self"}
)
def get_release(self, is_vertebrate: bool) -> int:
"""Retrieve current Ensembl or EnsemblGenomes release version."""
ext = "/info/data/?" if is_vertebrate else "/info/eg_version?"
ret = retry(request_json, 3, self._url, ext)
return int(ret["releases"][0] if is_vertebrate else ret["version"])
@staticmethod
@disk_cache.memoize(expire=cache_exp_other)
def get_releases(is_vertebrate: bool):
"""Retrieve all Ensembl or EnsemblGenomes release versions."""
url = "http://ftp.ensemblgenomes.org/pub?"
if is_vertebrate:
url = "http://ftp.ensembl.org/pub?"
ret = retry(requests.get, 3, url)
# sort releases new to old
releases = sorted(
[int(i) for i in re.findall(r'"release-(\d+)/"', ret.text)],
reverse=True,
)
if is_vertebrate:
# ignore immature releases
releases = [r for r in releases if r > 46]
return releases
@lock
@disk_cache.memoize(
expire=cache_exp_other, tag="get_releases-ensembl", ignore={"self"}
)
def releases_with_assembly(self, name: str):
"""List all Ensembl or EnsemblGenomes release versions with the specified genome."""
genome = self.genomes[safe(name)]
lwr_name = genome["name"]
asm_name = re.sub(r"\.p\d+$", "", safe(genome["assembly_name"]))
division, is_vertebrate = self.get_division(name)
# all releases with the genome fasta
releases = self.get_releases(is_vertebrate)
releases_with_assembly = []
for release in releases:
url = f"http://ftp.ensemblgenomes.org/pub/release-{release}/{division}/fasta/{lwr_name}/dna/"
if is_vertebrate:
url = f"https://ftp.ensembl.org/pub/release-{release}/fasta/{lwr_name}/dna/"
ret = retry(requests.get, 3, url)
if asm_name in ret.text: # 404 error has text too, so this always works
releases_with_assembly.append(release)
else:
break
return releases_with_assembly
def get_genome_download_link(self, name, mask="soft", **kwargs):
"""
Return http link to the genome sequence
Parameters
----------
name : str
Genome name. Current implementation will fail if exact
name is not found.
mask : str , optional
Masking level. Options: soft, hard or none. Default is soft.
Returns
------
str with the http download link.
"""
genome = self.genomes[safe(name)]
division, is_vertebrate = self.get_division(name)
# base directory of the genome
ftp = "http://ftp.ensemblgenomes.org"
if is_vertebrate:
ftp = "http://ftp.ensembl.org"
version = self.get_version(name, kwargs.get("version"))
div_path = "" if is_vertebrate else f"/{division}"
lwr_name = genome["name"]
ftp_directory = f"{ftp}/pub/release-{version}{div_path}/fasta/{lwr_name}/dna"
# this assembly has its own directory
if name == "GRCh37":
ftp_directory = genome["genome"].format(version)
# specific fasta file
cap_name = lwr_name.capitalize()
asm_name = re.sub(r"\.p\d+$", "", safe(genome["assembly_name"]))
mask_lvl = {"soft": "_sm", "hard": "_rm", "none": ""}[mask]
asm_lvl = "toplevel" if kwargs.get("toplevel") else "primary_assembly"
version_tag = "" if version > 30 else f".{version}"
ftp_file = f"{cap_name}.{asm_name}{version_tag}.dna{mask_lvl}.{asm_lvl}.fa.gz"
# combine
link = f"{ftp_directory}/{ftp_file}"
if check_url(link, 2):
return link
# primary assemblies do not always exist
if asm_lvl == "primary_assembly":
link = link.replace("primary_assembly", "toplevel")
if check_url(link, 2):
return link
raise GenomeDownloadError(
f"Could not download genome {name} from {self.name}.\n"
"URL is broken. Select another genome or provider.\n"
f"Broken URL: {link}"
)
def get_annotation_download_links(self, name, **kwargs):
"""
Retrieve functioning gene annotation download link(s).
Parameters
----------
name : str
genome name
**kwargs: dict, optional:
version : Ensembl version to use. By default the latest version is used
Returns
-------
list
http link(s)
"""
genome = self.genomes[safe(name)]
division, is_vertebrate = self.get_division(name)
# base directory of the genome
ftp = "http://ftp.ensemblgenomes.org"
if is_vertebrate:
ftp = "http://ftp.ensembl.org"
version = self.get_version(name, kwargs.get("version"))
div_path = "" if is_vertebrate else f"/{division}"
lwr_name = genome["name"]
ftp_directory = f"{ftp}/pub/release-{version}{div_path}/gtf/{lwr_name}"
# specific gtf file
cap_name = lwr_name.capitalize()
asm_name = re.sub(r"\.p\d+$", "", safe(genome["assembly_name"]))
ftp_file = f"{cap_name}.{asm_name}.{version}.gtf.gz"
# combine
link = f"{ftp_directory}/{ftp_file}"
if name == "GRCh37":
link = genome["annotation"].format(version)
return [link] if check_url(link, max_tries=2) else []
def request_json(rest_url, ext):
"""Make a REST request and return as json."""
if rest_url.endswith("/") and ext.startswith("/"):
ext = ext[1:]
r = requests.get(rest_url + ext, headers={"Content-Type": "application/json"})
if not r.ok:
r.raise_for_status()
return r.json()
def add_grch37(genomes):
genomes["GRCh37"] = {
"genome": (
"http://ftp.ensembl.org/pub/grch37/release-{}/fasta/homo_sapiens/dna"
),
"annotation": (
"http://ftp.ensembl.org/pub/grch37/release-{}/gtf/"
"homo_sapiens/Homo_sapiens.GRCh37.87.gtf.gz"
),
"assembly_accession": "GCA_000001405.14",
"taxonomy_id": 9606,
"name": "Homo_sapiens",
"scientific_name": "Homo sapiens",
"url_name": "human",
"assembly_name": "GRCh37",
"division": "vertebrates",
"base_count": "3137144693",
"display_name": "",
"genebuild": "",
}
return genomes
@lock
@disk_cache.memoize(expire=cache_exp_genomes, tag="get_genomes-ensembl")
def get_genomes(rest_url):
logger.info("Downloading assembly summaries from Ensembl")
genomes = {}
divisions = retry(request_json, 3, rest_url, "info/divisions?")
for division in divisions:
if division == "EnsemblBacteria":
continue
division_genomes = retry(
request_json, 3, rest_url, f"info/genomes/division/{division}?"
)
# filter summaries to these keys (to reduce the size of the cached data)
summary_keys_to_keep = [
"assembly_name",
"assembly_accession",
"taxonomy_id",
"name",
"scientific_name",
"url_name",
"display_name",
"genebuild",
"division",
"base_count",
]
for genome in division_genomes:
if "_gca_" in genome["name"]:
continue # ~1600 mislabeled protists and fungi
name = safe(genome["assembly_name"])
genomes[name] = {k: genome[k] for k in summary_keys_to_keep}
genomes = add_grch37(genomes)
return genomes
|
f127ab5c15e1b3f30389dc35e781598a77738aa3
|
efcd21234f3291e8fc561f49a7c88fc57a63e952
|
/tests/functional/validators/test_validators_directives_are_defined.py
|
d4b7dceb2eb386bec5d9f870f155c080d78a90d8
|
[
"MIT"
] |
permissive
|
tartiflette/tartiflette
|
146214a43847d2f423bf74594643c1fdefc746f1
|
421c1e937f553d6a5bf2f30154022c0d77053cfb
|
refs/heads/master
| 2023-09-01T02:40:05.974025
| 2022-01-20T14:55:31
| 2022-01-20T14:55:31
| 119,035,565
| 586
| 39
|
MIT
| 2023-09-11T07:49:27
| 2018-01-26T09:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
test_validators_directives_are_defined.py
|
import pytest
@pytest.mark.parametrize(
"query,expected",
[
(
"""
query{
catOrDog(id: 1) @dontExists { ... on Dog{ name @skip(if: false) } }
}
""",
{
"data": None,
"errors": [
{
"message": "Unknow Directive < @dontExists >.",
"path": ["catOrDog"],
"locations": [{"line": 3, "column": 33}],
"extensions": {
"rule": "5.7.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Directives-Are-Defined",
"tag": "directives-are-defined",
},
}
],
},
)
],
)
@pytest.mark.asyncio
@pytest.mark.ttftt_engine
async def test_validators_directives_are_defined(query, expected, engine):
assert await engine.execute(query) == expected
|
bfc55223a9b3ac406c0b9b8cd686fe7e947de348
|
1559f2458e0b3a8319bca67b3041377e8d0ebb65
|
/graphql_ws/django/routing.py
|
395e0df0796690bac29bc7679af77c76965a4ec7
|
[
"MIT"
] |
permissive
|
graphql-python/graphql-ws
|
c6fe4e598fb101aa654e778710cc0b4749f6f815
|
7ef25ecfaf5390bf1a0cb4023272d8cb074368c2
|
refs/heads/master
| 2022-11-11T02:16:31.805096
| 2021-08-24T09:05:56
| 2021-08-24T09:05:56
| 108,325,171
| 286
| 89
|
MIT
| 2022-10-26T22:53:52
| 2017-10-25T20:56:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
routing.py
|
from channels import __version__ as channels_version
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.sessions import SessionMiddlewareStack
from django.utils.version import get_version_tuple
from django.apps import apps
from django.urls import path
from .consumers import GraphQLSubscriptionConsumer
if apps.is_installed("django.contrib.auth"):
from channels.auth import AuthMiddlewareStack
else:
AuthMiddlewareStack = None
channels_version_tuple = get_version_tuple(channels_version)
if channels_version_tuple > (3, 0, 0):
websocket_urlpatterns = [
path("subscriptions", GraphQLSubscriptionConsumer.as_asgi())
]
else:
websocket_urlpatterns = [path("subscriptions", GraphQLSubscriptionConsumer)]
application = ProtocolTypeRouter({"websocket": URLRouter(websocket_urlpatterns)})
session_application = ProtocolTypeRouter(
{"websocket": SessionMiddlewareStack(URLRouter(websocket_urlpatterns))}
)
if AuthMiddlewareStack:
auth_application = ProtocolTypeRouter(
{"websocket": AuthMiddlewareStack(URLRouter(websocket_urlpatterns))}
)
|
7e070ef586854c3392d028a3b752657635cdb346
|
45e2f5288afd65cd6b2213117e7df4fdc72847c1
|
/integration_tests/src/main/python/get_json_test.py
|
a6c0e00db0b0be5acb492ea2fe984751c174eb58
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Zlib",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NVIDIA/spark-rapids
|
49cc2fb633f488dd48337c02b227502fcd473a12
|
5d5b3570eab2c8bb8d77d99613b19197b630a453
|
refs/heads/branch-23.10
| 2023-09-03T19:16:46.422726
| 2023-09-01T13:16:45
| 2023-09-01T13:16:45
| 264,043,501
| 600
| 212
|
Apache-2.0
| 2023-09-13T23:00:22
| 2020-05-14T22:56:44
|
Scala
|
UTF-8
|
Python
| false
| false
| 2,758
|
py
|
get_json_test.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from pyspark.sql.types import *
from marks import *
def mk_json_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
@pytest.mark.parametrize('json_str_pattern', [r'\{"store": \{"fruit": \[\{"weight":\d,"type":"[a-z]{1,9}"\}\], ' \
r'"bicycle":\{"price":[1-9]\d\.\d\d,"color":"[a-z]{0,4}"\}\},' \
r'"email":"[a-z]{1,5}\@[a-z]{3,10}\.com","owner":"[a-z]{3,8}"\}',
r'\{"a": "[a-z]{1,3}"\}'], ids=idfn)
def test_get_json_object(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
scalar_json = '{"store": {"fruit": [{"name": "test"}]}}'
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen, length=10).selectExpr(
'get_json_object(a,"$.a")',
'get_json_object(a, "$.owner")',
'get_json_object(a, "$.store.fruit[0]")',
'get_json_object(\'%s\', "$.store.fruit[0]")' % scalar_json,
),
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('json_str_pattern', [r'\{"store": \{"fruit": \[\{"weight":\d,"type":"[a-z]{1,9}"\}\], ' \
r'"bicycle":\{"price":[1-9]\d\.\d\d,"color":"[a-z]{0,4}"\}\},' \
r'"email":"[a-z]{1,5}\@[a-z]{3,10}\.com","owner":"[a-z]{3,8}"\}',
r'\{"a": "[a-z]{1,3}"\}'], ids=idfn)
def test_unsupported_fallback_get_json_object(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
scalar_json = '{"store": {"fruit": "test"}}'
pattern = StringGen(pattern=r'\$\.[a-z]{1,9}')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [('a', gen), ('b', pattern)], length=10).selectExpr(sql_text),
'GetJsonObject',
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
assert_gpu_did_fallback('get_json_object(a, b)')
assert_gpu_did_fallback('get_json_object(\'%s\', b)' % scalar_json)
|
9842151d89177b7590642af20c13dd33bfec6ec9
|
ea0c5e1cac9f6a212ed2700316370da8f7d5ebd9
|
/attributionpriors/ops.py
|
bd89cda77ac392af807c2ea2b588d2feb50b960e
|
[
"MIT"
] |
permissive
|
suinleelab/attributionpriors
|
61906e009e8706d865b9bf11b2752da4680da30f
|
487bd64842f341c918ed08121dc90718eb564789
|
refs/heads/master
| 2021-06-20T08:43:18.695052
| 2021-03-19T19:43:51
| 2021-03-19T19:43:51
| 193,598,358
| 129
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,596
|
py
|
ops.py
|
import tensorflow
# If using tensorflow v1, proceed as normal
if int(tensorflow.__version__[0])<2:
tf = tensorflow
# If using v2, use the compat module and disable tf2 behavior
else:
tf = tensorflow.compat.v1
tf.disable_v2_behavior()
import numpy as np
class AttributionPriorExplainer(object):
def __init__(self, random_alpha=True):
'''
Returns an object of type AttributionPriorExplainer.
Args:
random_alpha: Whether or not the interpolation constant
should be drawn uniformly at random from U[0, 1], or
evenly spaced between [0, 1] with k points, where
k is the number of background reference used during training.
Returns:
The newly created object.'''
self.random_alpha = random_alpha
return
def _permuted_references(self, input_tensor, k, shuffle=False):
'''
Given a tensor of shape [None, ...] where ... indicates input dimensions,
returns a random shuffling of that input.
Args:
input_tensor: An input tensor of shape [None, ...], where ...
indicates the input dimensions.
return_func: Whether or not to return a function as opposed to a tensor.
shuffle: If true, shuffles randomly along the batch axis. If false,
rolls deterministically along the batch axis.
Returns:
A tensor of shape [None, k, ...]. A background reference operation.
'''
if shuffle:
shuffle_list = [tf.random.shuffle(input_tensor)] * k
else:
shuffle_list = [tf.manip.roll(input_tensor, shift=i, axis=0) for i in range(1, k+1)]
return tf.stack(shuffle_list, axis=1, name='background_ref_op')
def _grad_across_multi_output(self, output_tensor, input_tensor, sparse_labels_op=None):
'''
Calculates the gradients for each output with respect to each input.
Args:
input_tensor: An input tensor of shape [None, ...] where ...
indicates the input dimensions. This function will throw an
error if input_tensor is of type list.
output_tensor: A tensor indicating the output of the model. Should be shaped as
[None, num_classes] where None indicates the batch dimension
and num_classes is the number of output classes of the model.
Returns:
A tensor of shape [None, num_classes, ...], where the ... indicates the input dimensions.
'''
if sparse_labels_op is not None:
sample_indices = tf.range(tf.shape(output_tensor)[0])
indices_tensor = tf.stack([sample_indices, tf.cast(sparse_labels_op, tf.int32)], axis=1)
class_selected_output_tensor = tf.gather_nd(output_tensor, indices_tensor)
class_selected_output_tensor.set_shape([None])
grad_tensor = tf.gradients(class_selected_output_tensor, input_tensor)[0]
return grad_tensor
else:
output_class_tensors = tf.unstack(output_tensor, axis=1)
grad_tensors = []
for output_class_tensor in output_class_tensors:
grad_tensor = tf.gradients(output_class_tensor, input_tensor)[0]
grad_tensors.append(grad_tensor)
multi_grad_tensor = tf.stack(grad_tensors, axis=1)
return multi_grad_tensor
def input_to_samples_delta(self, batch_input_op, background_ref_op='roll', k=1):
'''
Creates graph operations to switch between normal input and
input-reference interpolations.
Args:
batch_input_op: Tensor of shape (None, ...), where ... indicates
the input dimensions.
background_ref_op: A tensor of shape (None, k, ...) where ... indicates
the input dimensions, and k represents the number of
background reference samples to draw per input in the batch,
or a function that returns such a tensor when called. Alternatively,
if set to the string 'shuffle' or 'roll',
uses a shuffled/rolled version of the batch input as a background reference.
k: An integer specifying the number of background references if background_ref_op is 'roll' or
'shuffle'. Ignored if background_ref_op is a tensor.
Returns:
cond_input_op: A tensor of shape (None, ...). Use this tensor to build your model.
train_eg: A placeholder with default value False. Set to true to switch from normal inputs
to interpolated reference inputs.
'''
input_dims = batch_input_op.get_shape().as_list()[1:]
num_input_dims = len(input_dims)
def samples_input_fn():
if isinstance(background_ref_op, (tf.Tensor, tf.SparseTensor, tf.Variable)):
# if tf.contrib.framework.is_tensor(background_ref_op):
self.background_ref_op = background_ref_op
if k is not None:
print("Warning: value `{}` of parameter k will be ignored because background_ref_op was an input tensor".format(k))
elif callable(background_ref_op):
self.background_ref_op = background_ref_op()
if k is not None:
print("Warning: value `{}` of parameter k will be ignored because background_ref_op was callable".format(k))
elif isinstance(background_ref_op, str):
if background_ref_op == 'shuffle':
self.background_ref_op = self._permuted_references(batch_input_op, k, shuffle=True)
elif background_ref_op == 'roll':
self.background_ref_op = self._permuted_references(batch_input_op, k, shuffle=False)
else:
raise ValueError('Unrecognized string value `{}` for parameter `background_ref_op` (must be one of `shuffle`, `roll`)'.format(background_ref_op))
else:
raise ValueError('Unrecognized value `{}` for parameter `background_ref_op`'.format(background_ref_op))
batch_size = tf.shape(self.background_ref_op)[0]
k_ = self.background_ref_op.shape[1]
#Grab a [batch_size, k]-sized interpolation sample
#Note that evaluating t_tensor will evaluate background_ref_op implicitly for shape information
if self.random_alpha:
t_tensor = tf.random_uniform(shape=[batch_size, k_], name='t_tensor')
t_tensor = tf.cast(t_tensor, dtype=batch_input_op.dtype)
t_tensor.set_shape([None, k_])
else:
t_tensor = tf.linspace(start=0.0, stop=1.0, num=k_, name='linspace_t')
t_tensor = tf.expand_dims(t_tensor, axis=0)
t_tensor = tf.tile(t_tensor, [batch_size, 1], name='t_tensor')
t_tensor = tf.cast(t_tensor, dtype=batch_input_op.dtype)
t_tensor.set_shape([None, k_])
interp_coef = tf.reshape(t_tensor, [batch_size, k_] + [1] * num_input_dims, name='interp_coef')
#Evaluate the end points
end_point_ref = tf.multiply(1.0 - interp_coef, self.background_ref_op, name='end_point_ref')
input_expand_mult = tf.expand_dims(batch_input_op, axis=1)
end_point_input = tf.multiply(interp_coef, input_expand_mult, name='end_point_input')
#Add the end points together, because, you know, interpolation
samples_input = tf.add(end_point_input, end_point_ref, name='samples_input')
return samples_input
#Define operations to switch between interpolation and normal input
train_eg = tf.placeholder_with_default(False, shape=(), name='train_eg')
cond_input_op = tf.cond(train_eg, samples_input_fn, lambda: batch_input_op)
cond_input_op = tf.reshape(cond_input_op, shape=[-1] + input_dims, name='cond_input_op')
self.samples_delta = tf.subtract(tf.expand_dims(batch_input_op, axis=1),
self.background_ref_op, name='samples_delta')
return cond_input_op, train_eg
def shap_value_op(self, output_op, cond_input_op, sparse_labels_op=None):
'''
Creates a tensor operation to calculate expected gradients with respect to the provided input operation.
This will throw an error if you haven't first called input_to_samples_delta.
Args:
output_op: The output layer of your model, or the layer to take the expected gradients with respect to.
cond_input_op: An operation that returns interpolations between inputs and background reference samples.
The output from input_to_samples_delta.
sparse_labels_op: For multi-class problems, the true labels of your input data. Assumes that there are a discrete
number of classes from 0 to num_classes and this tensor provides the integer label (NOT ONE HOT ENCODED)
for each batch of input. This tensor is used to index into the gradient operation such that the
explanations returned for each sample are of its true class. You can also manipulate
this operation to get explanations for a specific class, or set it to
None to get an operation that returns explanations for multiple classes.
Returns:
expected_grads: A tensor of shape (None, ...), the same shape as an input batch. The expected gradients with
respect to the input batch.
'''
multi_output = True
if len(output_op.shape) == 1:
multi_output = False
refs_per_input = self.samples_delta.get_shape().as_list()[1]
if multi_output:
if sparse_labels_op is None:
print('You have requested multi-class values, but have not provided a labels tensor. This may be memory intensive...')
#Of shape (None, num_classes, ...)
gradient_tensor = self._grad_across_multi_output(output_op, cond_input_op)
gradient_tensor = tf.reshape(gradient_tensor, shape=[-1, refs_per_input, output_op.shape[-1]] + cond_input_op.get_shape().as_list()[1:], name='gradient_tensor')
mult_grads = tf.expand_dims(self.samples_delta, axis=2)
mult_grads = tf.multiply(mult_grads, gradient_tensor, name='mult_grads')
expected_grads = tf.reduce_mean(mult_grads, axis=1)
return expected_grads
with tf.device('/cpu:0'):
sparse_labels_op = tf.tile(tf.expand_dims(sparse_labels_op, axis=1), (1, refs_per_input))
sparse_labels_op = tf.reshape(sparse_labels_op, (tf.shape(cond_input_op)[0], ), name='sparse_labels_op')
#Gradients will be of shape (None, ...)
gradient_tensor = self._grad_across_multi_output(output_op, cond_input_op, sparse_labels_op)
else:
assert sparse_labels_op is None, 'You have passed in a sparse_labels_op, but your model is not multi-output'
#Gradients will be of shape (None, ...) - same shape as input
gradient_tensor = tf.gradients(output_op, cond_input_op)[0]
#Reshape the gradient tensor into (None, k, ...) so that we can average across the k.
#Note: it is important that the axis ordering is (None, k, ...). Keeping the k before
#the feature dimensions allows the reshaping after applying the model output to behave correctly.
#This is because the right-most dimensions are squashed first when reshaping.
gradient_tensor = tf.reshape(gradient_tensor, shape=[-1, refs_per_input] + gradient_tensor.get_shape().as_list()[1:], name='gradient_tensor')
#Multiply gradients and input-reference difference
mult_grads = tf.multiply(self.samples_delta, gradient_tensor, name='mult_grads')
#Average over k, the background references
expected_grads = tf.reduce_mean(mult_grads, axis=1, name='expected_grads')
return expected_grads
|
2d8841f10146e135b0011d5259cf01004f61e78c
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/st/composite/test_graph_kernel_feature.py
|
6c436eacd9ee27777781d23c86c231ba6d3a2a79
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,865
|
py
|
test_graph_kernel_feature.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import pytest
from tests.common.base import get_splitted_cases
from tests.st.composite.test_composite_json import test_single_file
@pytest.mark.skip
def test_feature(dir, level, split_nums=1, split_idx=0):
pwd = os.path.dirname(os.path.abspath(__file__))
script_file = os.path.join(pwd, "run_composite_json.py")
def prepare_script(pwd, script_file):
if os.path.isfile(script_file):
return
src = os.path.join(pwd, "../composite/test_composite_json.py")
subprocess.call("cp %s %s" % (src, script_file), shell=True)
prepare_script(pwd, script_file)
output = os.path.join(pwd, "output" + "_" + dir, level)
if not os.path.isdir(output):
os.makedirs(output, exist_ok=True)
files_path = os.path.join(pwd, dir, level)
all_files = os.listdir(files_path)
all_files.sort()
files = get_splitted_cases(all_files, split_nums, split_idx)
for item in files:
file_path = os.path.join(files_path, item)
poly = True
attrs = None
test_single_file(file_path, attrs, poly, profiling=False)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_elemany_gpu_level0():
test_feature("elemany", "level0")
|
c112bf9f8838d091e2687ac8359484e304d54f97
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/tall/get_310_eval_dataset.py
|
e1bda3cdceeb4bafdb7b8f22c179be4b6ca5f9b6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
get_310_eval_dataset.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Use this file to generate ascend310 inference datasets
"""
import argparse
import os
import numpy as np
from src.dataset import TestingDataSet
def get_args():
""" get args"""
parser = argparse.ArgumentParser(description='Train CTRL')
parser.add_argument('--eval_data_dir', type=str, default="/home/yuanyibo/dataset",
help='the directory of train data.')
return parser.parse_args()
args = get_args()
if __name__ == '__main__':
img_path = os.path.join(args.eval_data_dir, "Interval128_256_overlap0.8_c3d_fc6/")
csv_path = os.path.join(args.eval_data_dir, "exp_data/TACoS/test_clip-sentvec.pkl")
dataset = TestingDataSet(img_path, csv_path, 1)
target_path = './310_eval_dataset'
if not os.path.exists(target_path):
os.makedirs(target_path)
for movie_name in dataset.movie_names:
batch = 128
print("loading movie data: " + movie_name + "....")
movie_clip_featmaps, movie_clip_sentences = dataset.load_movie_slidingclip(movie_name, 16)
for k in range(0, len(movie_clip_sentences), batch):
sent_vec = [x[1] for x in movie_clip_sentences[k:k + batch]]
length_k = len(sent_vec)
sent_vec = np.array(sent_vec)
if length_k < batch:
padding = np.zeros(shape=[batch - length_k, sent_vec.shape[1]], dtype=np.float32)
sent_vec = np.concatenate((sent_vec, padding), axis=0)
batch = 128
for t in range(0, len(movie_clip_featmaps), batch):
featmap = [x[1] for x in movie_clip_featmaps[t:t + batch]]
length_t = len(featmap)
featmap = np.array(featmap)
visual_clip_name = [x[0] for x in movie_clip_featmaps[t:t + batch]]
if length_t < batch:
padding = np.zeros(shape=[batch - length_t, featmap.shape[1]], dtype=np.float32)
featmap = np.concatenate((featmap, padding), axis=0)
start = np.array([int(x.split("_")[1]) for x in visual_clip_name])
end = np.array([int(x.split("_")[2].split("_")[0]) for x in visual_clip_name])
input_feat = np.concatenate((featmap, sent_vec), axis=1)
name = movie_name+'_sent_'+str(k)+'_clip_'+str(t)
input_feat.tofile(os.path.join(target_path, f'{name}.data'))
|
7fb5317c362d73895c81261aff31ac36f2133f1b
|
168f6e03f82b9a6530ce174659c218a162248b4d
|
/minidump/streams/Memory64ListStream.py
|
a07682ee9167b7a9401e9845d8d2d7d8c0e3e496
|
[
"MIT"
] |
permissive
|
skelsec/minidump
|
c35a07846540c345d73e72c7e39525e42ba99662
|
069422590a5856eec8bfcf174b55a45442b8942f
|
refs/heads/master
| 2023-04-07T18:58:30.406970
| 2023-02-21T15:35:03
| 2023-02-21T15:35:03
| 134,586,242
| 226
| 50
|
MIT
| 2023-03-20T17:39:12
| 2018-05-23T15:02:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
Memory64ListStream.py
|
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import io
from minidump.common_structs import *
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680387(v=vs.85).aspx
class MINIDUMP_MEMORY64_LIST:
def __init__(self):
self.NumberOfMemoryRanges = None
self.BaseRva = None
self.MemoryRanges = []
def get_size(self):
return 8 + 8 + len(self.MemoryRanges) * MINIDUMP_MEMORY_DESCRIPTOR64().get_size()
def to_bytes(self):
t = len(self.MemoryRanges).to_bytes(8, byteorder = 'little', signed = False)
t += self.BaseRva.to_bytes(8, byteorder = 'little', signed = False)
for memrange in self.MemoryRanges:
t += memrange.to_bytes()
return t
@staticmethod
def parse(buff):
mml = MINIDUMP_MEMORY64_LIST()
mml.NumberOfMemoryRanges = int.from_bytes(buff.read(8), byteorder = 'little', signed = False)
mml.BaseRva = int.from_bytes(buff.read(8), byteorder = 'little', signed = False)
for _ in range(mml.NumberOfMemoryRanges):
mml.MemoryRanges.append(MINIDUMP_MEMORY_DESCRIPTOR64.parse(buff))
return mml
def __str__(self):
t = '== MINIDUMP_MEMORY64_LIST ==\n'
t += 'NumberOfMemoryRanges: %s\n' % self.NumberOfMemoryRanges
t += 'BaseRva: %s\n' % self.BaseRva
for i in range(self.NumberOfMemoryRanges):
t += str(self.MemoryRanges[i]) + '\n'
return t
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680384(v=vs.85).aspx
class MINIDUMP_MEMORY_DESCRIPTOR64:
def __init__(self):
self.StartOfMemoryRange = None
self.DataSize = None
def get_size(self):
return 16
def to_bytes(self):
t = self.StartOfMemoryRange.to_bytes(8, byteorder = 'little', signed = False)
t += self.DataSize.to_bytes(8, byteorder = 'little', signed = False)
return t
@staticmethod
def parse(buff):
md = MINIDUMP_MEMORY_DESCRIPTOR64()
md.StartOfMemoryRange = int.from_bytes(buff.read(8), byteorder = 'little', signed = False)
md.DataSize = int.from_bytes(buff.read(8), byteorder = 'little', signed = False)
return md
def __str__(self):
t = 'Start: %s' % hex(self.StartOfMemoryRange)
t += 'Size: %s' % self.DataSize
return t
class MinidumpMemory64List:
def __init__(self):
self.memory_segments = []
@staticmethod
def parse(dir, buff):
t = MinidumpMemory64List()
buff.seek(dir.Location.Rva)
chunk = io.BytesIO(buff.read(dir.Location.DataSize))
mtl = MINIDUMP_MEMORY64_LIST.parse(chunk)
rva = mtl.BaseRva
for mod in mtl.MemoryRanges:
t.memory_segments.append(MinidumpMemorySegment.parse_full(mod, rva))
rva += mod.DataSize
return t
@staticmethod
async def aparse(dir, buff):
mml = MinidumpMemory64List()
await buff.seek(dir.Location.Rva)
chunk_data = await buff.read(dir.Location.DataSize)
chunk = io.BytesIO(chunk_data)
mtl = MINIDUMP_MEMORY64_LIST.parse(chunk)
rva = mtl.BaseRva
for mod in mtl.MemoryRanges:
ms = MinidumpMemorySegment.parse_full(mod, rva)
mml.memory_segments.append(ms)
rva += mod.DataSize
return mml
def to_table(self):
t = []
t.append(MinidumpMemorySegment.get_header())
for mod in self.memory_segments:
t.append(mod.to_row())
return t
def __str__(self):
return '== MinidumpMemory64List ==\n' + construct_table(self.to_table())
|
5b17c82955670edaad5db39b28e3933e1f6716bf
|
bb90ad20468f9fe2039b8c16858bd8eae8bbc050
|
/tests/e2e_logging.py
|
4b4b374c78302d8da0b8f76524dd9b0679e76528
|
[
"Apache-2.0"
] |
permissive
|
microsoft/CCF
|
0997fd81a924d36d775b219720b26b4ff196b18a
|
2fbf87840b9e8334c141f4a9c9b25aae979b0540
|
refs/heads/main
| 2023-09-05T15:39:37.265089
| 2023-09-05T15:27:25
| 2023-09-05T15:27:25
| 180,112,558
| 687
| 229
|
Apache-2.0
| 2023-09-14T14:28:39
| 2019-04-08T09:13:04
|
C++
|
UTF-8
|
Python
| false
| false
| 66,423
|
py
|
e2e_logging.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.network
import suite.test_requirements as reqs
import infra.logging_app as app
import infra.e2e_args
from infra.tx_status import TxStatus
import infra.checker
import infra.jwt_issuer
import infra.proc
import http
from http.client import HTTPResponse
import ssl
import socket
import os
from collections import defaultdict
import time
import json
import hashlib
import infra.clients
from infra.log_capture import flush_info
import ccf.receipt
from ccf.tx_id import TxID
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
from cryptography.exceptions import InvalidSignature
from cryptography.x509 import ObjectIdentifier
import urllib.parse
import random
import re
import infra.crypto
from infra.runner import ConcurrentRunner
from hashlib import sha256
from infra.member import AckException
import e2e_common_endpoints
from types import MappingProxyType
import threading
import copy
from loguru import logger as LOG
def show_cert(name, cert):
from OpenSSL.crypto import dump_certificate, FILETYPE_TEXT
dc = dump_certificate(FILETYPE_TEXT, cert).decode("unicode_escape")
LOG.info(f"{name} cert: {dc}")
def verify_endorsements_openssl(service_cert, receipt):
from OpenSSL.crypto import (
load_certificate,
FILETYPE_PEM,
X509,
X509Store,
X509StoreContext,
)
store = X509Store()
# pyopenssl does not support X509_V_FLAG_NO_CHECK_TIME. For recovery of expired
# services and historical receipt, we want to ignore the validity time. 0x200000
# is the bitmask for this option in more recent versions of OpenSSL.
X509_V_FLAG_NO_CHECK_TIME = 0x200000
store.set_flags(X509_V_FLAG_NO_CHECK_TIME)
store.add_cert(X509.from_cryptography(service_cert))
chain = None
if "service_endorsements" in receipt:
chain = []
for endo in receipt["service_endorsements"]:
chain.append(load_certificate(FILETYPE_PEM, endo.encode()))
node_cert_pem = receipt["cert"].encode()
ctx = X509StoreContext(store, load_certificate(FILETYPE_PEM, node_cert_pem), chain)
ctx.verify_certificate() # (throws on error)
def verify_receipt(
receipt, service_cert, claims=None, generic=True, skip_endorsement_check=False
):
"""
Raises an exception on failure
"""
node_cert = load_pem_x509_certificate(receipt["cert"].encode(), default_backend())
if not skip_endorsement_check:
service_endorsements = []
if "service_endorsements" in receipt:
service_endorsements = [
load_pem_x509_certificate(endo.encode(), default_backend())
for endo in receipt["service_endorsements"]
]
ccf.receipt.check_endorsements(node_cert, service_cert, service_endorsements)
verify_endorsements_openssl(service_cert, receipt)
if claims is not None:
assert "leaf_components" in receipt
assert "commit_evidence" in receipt["leaf_components"]
commit_evidence_digest = sha256(
receipt["leaf_components"]["commit_evidence"].encode()
).digest()
if not generic:
assert "claims_digest" not in receipt["leaf_components"]
claims_digest = sha256(claims).digest()
leaf = (
sha256(
bytes.fromhex(receipt["leaf_components"]["write_set_digest"])
+ commit_evidence_digest
+ claims_digest
)
.digest()
.hex()
)
else:
assert "leaf_components" in receipt, receipt
assert "write_set_digest" in receipt["leaf_components"]
write_set_digest = bytes.fromhex(receipt["leaf_components"]["write_set_digest"])
assert "commit_evidence" in receipt["leaf_components"]
commit_evidence_digest = sha256(
receipt["leaf_components"]["commit_evidence"].encode()
).digest()
claims_digest = (
bytes.fromhex(receipt["leaf_components"]["claims_digest"])
if "claims_digest" in receipt["leaf_components"]
else b""
)
leaf = (
sha256(write_set_digest + commit_evidence_digest + claims_digest)
.digest()
.hex()
)
root = ccf.receipt.root(leaf, receipt["proof"])
ccf.receipt.verify(root, receipt["signature"], node_cert)
@reqs.description("Running transactions against logging app")
@reqs.supports_methods("/app/log/private", "/app/log/public")
@reqs.at_least_n_nodes(2)
@app.scoped_txs(verify=False)
def test(network, args):
network.txs.issue(
network=network,
number_txs=1,
)
# HTTP2 doesn't support forwarding
if not args.http2:
network.txs.issue(
network=network,
number_txs=1,
on_backup=True,
)
network.txs.verify()
return network
@reqs.description("Protocol-illegal traffic")
@reqs.supports_methods("/app/log/private", "/app/log/public")
@reqs.at_least_n_nodes(2)
def test_illegal(network, args):
primary, _ = network.find_primary()
cafile = os.path.join(network.common_dir, "service_cert.pem")
context = ssl.create_default_context(cafile=cafile)
context.load_cert_chain(
certfile=os.path.join(network.common_dir, "user0_cert.pem"),
keyfile=os.path.join(network.common_dir, "user0_privk.pem"),
)
def get_main_interface_metrics():
with primary.client() as c:
return c.get("/node/metrics").body.json()["sessions"]["interfaces"][
infra.interfaces.PRIMARY_RPC_INTERFACE
]
def send_raw_content(content):
# Send malformed HTTP traffic and check the connection is closed
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn = context.wrap_socket(
sock, server_side=False, server_hostname=primary.get_public_rpc_host()
)
conn.connect((primary.get_public_rpc_host(), primary.get_public_rpc_port()))
LOG.info(f"Sending: {content}")
conn.sendall(content)
response = HTTPResponse(conn)
response.begin()
return response
additional_parsing_errors = 0
def send_bad_raw_content(content):
nonlocal additional_parsing_errors
try:
response = send_raw_content(content)
except http.client.RemoteDisconnected:
assert args.http2, "HTTP/2 interface should close session without error"
additional_parsing_errors += 1
return
else:
assert not args.http2, "HTTP/1.1 interface should return valid error"
response_body = response.read()
LOG.warning(response_body)
# If request parsing error, the interface metrics should report it
if response_body.startswith(b"Unable to parse data as a HTTP request."):
additional_parsing_errors += 1
if response.status == http.HTTPStatus.BAD_REQUEST:
assert content in response_body, response_body
else:
assert response.status in {http.HTTPStatus.NOT_FOUND}, (
response.status,
response_body,
)
initial_parsing_errors = get_main_interface_metrics()["errors"]["parsing"]
send_bad_raw_content(b"\x01")
send_bad_raw_content(b"\x01\x02\x03\x04")
send_bad_raw_content(b"NOTAVERB ")
send_bad_raw_content(b"POST / HTTP/42.42")
send_bad_raw_content(json.dumps({"hello": "world"}).encode())
# Tests non-UTF8 encoding in OData
send_bad_raw_content(b"POST /node/\xff HTTP/2.0\r\n\r\n")
for _ in range(40):
content = bytes(random.randint(0, 255) for _ in range(random.randrange(1, 2)))
# If we've accidentally produced something that might look like a valid HTTP request prefix, mangle it further
first_byte = content[0]
if (
first_byte >= ord("A")
and first_byte <= ord("Z")
or first_byte == ord("\r")
or first_byte == ord("\n")
):
content = b"\00" + content
send_bad_raw_content(content)
def send_corrupt_variations(content):
for i in range(len(content) - 1):
for replacement in (b"\x00", b"\x01", bytes([(content[i] + 128) % 256])):
corrupt_content = content[:i] + replacement + content[i + 1 :]
send_bad_raw_content(corrupt_content)
assert (
get_main_interface_metrics()["errors"]["parsing"]
== initial_parsing_errors + additional_parsing_errors
)
if not args.http2:
good_content = b"GET /node/state HTTP/1.1\r\n\r\n"
response = send_raw_content(good_content)
assert response.status == http.HTTPStatus.OK, (response.status, response.read())
send_corrupt_variations(good_content)
# Valid transactions are still accepted
network.txs.issue(
network=network,
number_txs=1,
)
# HTTP/2 does not support forwarding
if not args.http2:
network.txs.issue(
network=network,
number_txs=1,
on_backup=True,
)
network.txs.verify()
return network
@reqs.description("Alternative protocols")
@reqs.supports_methods("/log/private", "/log/public")
@reqs.at_least_n_nodes(2)
def test_protocols(network, args):
primary, _ = network.find_primary()
primary_root = (
f"https://{primary.get_public_rpc_host()}:{primary.get_public_rpc_port()}"
)
url = f"{primary_root}/node/state"
ca_path = os.path.join(network.common_dir, "service_cert.pem")
common_options = [
url,
"-sS",
"--cacert",
ca_path,
"-w",
"\\n%{http_code}\\n%{http_version}",
]
def parse_result_out(r):
assert r.returncode == 0, r.returncode
body = r.stdout.decode()
return body.rsplit("\n", 2)
# Call without any extra args to get golden response
res = infra.proc.ccall(
"curl",
*common_options,
)
expected_response_body, status_code, http_version = parse_result_out(res)
assert status_code == "200", status_code
assert http_version == "2" if args.http2 else "1.1", http_version
protocols = {
# WebSockets upgrade request is ignored
"websockets": {
"extra_args": [
"-H",
"Upgrade: websocket",
"-H",
"Connection: Upgrade",
],
"http_status": "200",
"http_version": "1.1",
},
# HTTP3 is not supported by curl _or_ CCF
"--http3": {
"errors": [
"the installed libcurl version doesn't support this",
"option --http3: is unknown",
]
},
}
if args.http2:
protocols.update(
{
# HTTP/1.x requests fail with closed connection, as HTTP/2
"--http1.0": {"errors": ["Empty reply from server"]},
"--http1.1": {"errors": ["Empty reply from server"]},
# TLS handshake negotiates HTTP/2
"--http2": {"http_status": "200", "http_version": "1.1"},
"--http2-prior-knowledge": {
"http_status": "200",
"http_version": "1.1",
},
}
)
else: # HTTP/1.1
protocols.update(
{
# HTTP/1.x requests succeed, as HTTP/1.1
"--http1.0": {"http_status": "200", "http_version": "1.1"},
"--http1.1": {"http_status": "200", "http_version": "1.1"},
# TLS handshake negotiates HTTP/1.1
"--http2": {"http_status": "200", "http_version": "1.1"},
"--http2-prior-knowledge": {
"http_status": "200",
"http_version": "1.1",
},
}
)
# Test additional protocols with curl
for protocol, expected_result in protocols.items():
LOG.debug(protocol)
cmd = ["curl", *common_options]
if "extra_args" in expected_result:
cmd.extend(expected_result["extra_args"])
else:
cmd.append(protocol)
res = infra.proc.ccall(*cmd)
if "errors" not in expected_result:
response_body, status_code, http_version = parse_result_out(res)
assert (
response_body == expected_response_body
), f"{response_body}\n !=\n{expected_response_body}"
assert status_code == "200", status_code
assert http_version == "2" if args.http2 else "1.1", http_version
else:
assert res.returncode != 0, res.returncode
err = res.stderr.decode()
expected_errors = expected_result["errors"]
assert any(expected_error in err for expected_error in expected_errors), err
# Valid transactions are still accepted
network.txs.issue(
network=network,
number_txs=1,
)
# HTTP/2 does not support forwarding
if not args.http2:
network.txs.issue(
network=network,
number_txs=1,
on_backup=True,
)
network.txs.verify()
return network
@reqs.description("Write/Read/Delete messages on primary")
@reqs.supports_methods("/app/log/private")
def test_remove(network, args):
check = infra.checker.Checker()
for priv in [True, False]:
txid = network.txs.issue(network, send_public=not priv, send_private=priv)
_, log_id = network.txs.get_log_id(txid)
network.txs.delete(log_id, priv=priv)
r = network.txs.request(log_id, priv=priv)
if args.package in ["libjs_generic"]:
check(r, result={"error": "No such key"})
else:
check(
r,
error=lambda status, msg: status == http.HTTPStatus.BAD_REQUEST.value
and msg.json()["error"]["code"] == "ResourceNotFound",
)
return network
@reqs.description("Write/Read/Clear messages on primary")
@reqs.supports_methods("/app/log/private/all", "/app/log/public/all")
@app.scoped_txs()
def test_clear(network, args):
primary, _ = network.find_primary()
with primary.client() as nc:
check_commit = infra.checker.Checker(nc)
check = infra.checker.Checker()
start_log_id = 7
with primary.client("user0") as c:
log_ids = list(range(start_log_id, start_log_id + 10))
msg = "Will be deleted"
for table in ["private", "public"]:
resource = f"/app/log/{table}"
for log_id in log_ids:
check_commit(
c.post(resource, {"id": log_id, "msg": msg}),
result=True,
)
check(c.get(f"{resource}?id={log_id}"), result={"msg": msg})
check(
c.delete(f"{resource}/all"),
result=None,
)
for log_id in log_ids:
get_r = c.get(f"{resource}?id={log_id}")
if args.package in ["libjs_generic"]:
check(
get_r,
result={"error": "No such key"},
)
else:
check(
get_r,
error=lambda status, msg: status
== http.HTTPStatus.BAD_REQUEST.value,
)
# Make sure no-one else is still looking for these
network.txs.clear()
return network
@reqs.description("Count messages on primary")
@reqs.supports_methods("/app/log/private/count", "/app/log/public/count")
@app.scoped_txs()
def test_record_count(network, args):
primary, _ = network.find_primary()
with primary.client() as nc:
check_commit = infra.checker.Checker(nc)
check = infra.checker.Checker()
with primary.client("user0") as c:
msg = "Will be deleted"
def get_count(resource):
r_get = c.get(f"{resource}/count")
assert r_get.status_code == http.HTTPStatus.OK
return int(r_get.body.json())
for table in ["private", "public"]:
resource = f"/app/log/{table}"
count = get_count(resource)
# Add several new IDs
start_log_id = 7
for i in range(10):
log_id = start_log_id + i
check_commit(
c.post(resource, {"id": log_id, "msg": msg}),
result=True,
)
new_count = get_count(resource)
assert (
new_count == count + 1
), f"Added one ID after {count}, but found {new_count} resulting IDs"
count = new_count
# Clear all IDs
check(
c.delete(f"{resource}/all"),
result=None,
)
new_count = get_count(resource)
assert new_count == 0, f"Found {new_count} remaining IDs after clear"
# Make sure no-one else is still looking for these
network.txs.clear()
return network
@reqs.description("Write/Read with cert prefix")
@reqs.supports_methods("/app/log/private/prefix_cert", "/app/log/private")
def test_cert_prefix(network, args):
msg = "This message will be prefixed"
log_id = 7
for user in network.users:
network.txs.issue(
network,
idx=log_id,
msg=msg,
send_public=False,
url_suffix="prefix_cert",
user=user.local_id,
)
r = network.txs.request(log_id, priv=True, user=user.local_id)
prefixed_msg = f"{user.service_id}: {msg}"
network.txs.priv[log_id][-1]["msg"] = prefixed_msg
assert prefixed_msg in r.body.json()["msg"], r
return network
@reqs.description("Write as anonymous caller")
@reqs.supports_methods("/app/log/private/anonymous", "/app/log/private")
@app.scoped_txs()
def test_anonymous_caller(network, args):
# Create a new user but do not record its identity
network.create_user("user5", args.participants_curve, record=False)
log_id = 7
msg = "This message is anonymous"
network.txs.issue(
network,
1,
idx=log_id,
send_public=False,
msg=msg,
user="user5",
url_suffix="anonymous",
)
prefixed_msg = f"Anonymous: {msg}"
network.txs.priv[log_id][-1]["msg"] = prefixed_msg
r = network.txs.request(log_id, priv=True, user="user5")
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value, r
r = network.txs.request(log_id, priv=True)
assert msg in r.body.json()["msg"], r
return network
@reqs.description("Use multiple auth types on the same endpoint")
@reqs.supports_methods("/app/multi_auth")
def test_multi_auth(network, args):
primary, _ = network.find_primary()
user = network.users[0]
member = network.consortium.members[0]
with primary.client(user.local_id) as c:
response_bodies = set()
def require_new_response(r):
assert r.status_code == http.HTTPStatus.OK.value, r.status_code
r_body = r.body.text()
assert r_body not in response_bodies, r_body
response_bodies.add(r_body)
LOG.info("Anonymous, no auth")
with primary.client() as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate as a user, via TLS cert")
with primary.client(user.local_id) as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate as same user, now with user data")
network.consortium.set_user_data(
primary, user.service_id, {"some": ["interesting", "data", 42]}
)
with primary.client(user.local_id) as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate as a different user, via TLS cert")
with primary.client("user1") as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate as a member, via TLS cert")
with primary.client(member.local_id) as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate as same member, now with user data")
network.consortium.set_member_data(
primary, member.service_id, {"distinct": {"arbitrary": ["data"]}}
)
with primary.client(member.local_id) as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate as a different member, via TLS cert")
with primary.client("member1") as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate via JWT token")
jwt_issuer = infra.jwt_issuer.JwtIssuer()
jwt_issuer.register(network)
jwt = jwt_issuer.issue_jwt(claims={"user": "Alice"})
with primary.client() as c:
r = c.post("/app/multi_auth", headers={"authorization": "Bearer " + jwt})
require_new_response(r)
LOG.info("Authenticate via second JWT token")
jwt2 = jwt_issuer.issue_jwt(claims={"user": "Bob"})
with primary.client(common_headers={"authorization": "Bearer " + jwt2}) as c:
r = c.post("/app/multi_auth")
require_new_response(r)
LOG.info("Authenticate via COSE Sign1 payload")
with primary.client(None, None, "user1") as c:
r = c.post("/app/multi_auth", body={"some": "content"})
require_new_response(r)
return network
@reqs.description("Call an endpoint with a custom auth policy")
@reqs.supports_methods("/app/custom_auth")
def test_custom_auth(network, args):
primary, other = network.find_primary_and_any_backup()
nodes = (primary, other)
if args.http2:
# HTTP2 doesn't support forwarding
nodes = (primary,)
for node in nodes:
with node.client() as c:
LOG.info("Request without custom headers is refused")
r = c.get("/app/custom_auth")
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value, r.status_code
name_header = "x-custom-auth-name"
age_header = "x-custom-auth-age"
LOG.info("Requests with partial headers are refused")
r = c.get("/app/custom_auth", headers={name_header: "Bob"})
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value, r.status_code
r = c.get("/app/custom_auth", headers={age_header: "42"})
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value, r.status_code
LOG.info("Requests with unacceptable header contents are refused")
r = c.get("/app/custom_auth", headers={name_header: "", age_header: "42"})
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value, r.status_code
r = c.get(
"/app/custom_auth", headers={name_header: "Bob", age_header: "12"}
)
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value, r.status_code
LOG.info("Request which meets all requirements is accepted")
r = c.get(
"/app/custom_auth", headers={name_header: "Alice", age_header: "42"}
)
assert r.status_code == http.HTTPStatus.OK.value, r.status_code
response = r.body.json()
assert response["name"] == "Alice", response
assert response["age"] == 42, response
return network
@reqs.description("Call an endpoint with a custom auth policy which throws")
@reqs.supports_methods("/app/custom_auth")
def test_custom_auth_safety(network, args):
primary, other = network.find_primary_and_any_backup()
nodes = (primary, other)
if args.http2:
# HTTP2 doesn't support forwarding
nodes = (primary,)
for node in nodes:
with node.client() as c:
r = c.get(
"/app/custom_auth",
headers={"x-custom-auth-explode": "Boom goes the dynamite"},
)
assert (
r.status_code == http.HTTPStatus.INTERNAL_SERVER_ERROR.value
), r.status_code
return network
def get_metrics(r, path, method, default=None):
try:
return next(
v
for v in r.body.json()["metrics"]
if v["path"] == path and v["method"] == method
)
except StopIteration:
return default
@reqs.description("Write non-JSON body")
@reqs.supports_methods("/app/log/private/raw_text/{id}", "/app/log/private")
@app.scoped_txs()
def test_raw_text(network, args):
log_id = 7
msg = "This message is not in JSON"
r = network.txs.post_raw_text(log_id, msg)
assert r.status_code == http.HTTPStatus.OK.value
r = network.txs.request(log_id, priv=True)
assert msg in r.body.json()["msg"], r
primary, _ = network.find_primary()
with primary.client("user0") as c:
r = c.get("/app/api/metrics")
assert get_metrics(r, "log/private/raw_text/{id}", "POST")["calls"] > 0
return network
@reqs.description("Read metrics")
@reqs.supports_methods("/app/api/metrics")
def test_metrics(network, args):
primary, _ = network.find_primary()
calls = 0
errors = 0
with primary.client("user0") as c:
r = c.get("/app/api/metrics")
m = get_metrics(r, "api/metrics", "GET")
calls = m["calls"]
errors = m["errors"]
with primary.client("user0") as c:
r = c.get("/app/api/metrics")
assert get_metrics(r, "api/metrics", "GET")["calls"] == calls + 1
r = c.get("/app/api/metrics")
assert get_metrics(r, "api/metrics", "GET")["calls"] == calls + 2
with primary.client() as c:
r = c.get("/app/api/metrics", headers={"accept": "nonsense"})
assert r.status_code == http.HTTPStatus.BAD_REQUEST.value
with primary.client() as c:
r = c.get("/app/api/metrics")
assert get_metrics(r, "api/metrics", "GET")["errors"] == errors + 1
calls = 0
with primary.client("user0") as c:
r = c.get("/app/api/metrics")
calls = get_metrics(r, "log/public", "POST", {"calls": 0})["calls"]
network.txs.issue(
network=network,
number_txs=1,
)
with primary.client("user0") as c:
r = c.get("/app/api/metrics")
assert get_metrics(r, "log/public", "POST")["calls"] == calls + 1
with primary.client("user0") as c:
r = c.get("/app/no_such_endpoint")
assert r.status_code == http.HTTPStatus.NOT_FOUND.value
r = c.get("/app/api/metrics")
assert (
get_metrics(
r,
"no_such_endpoint",
"GET",
)
is None
)
return network
@reqs.description("Read historical state")
@reqs.supports_methods("/app/log/private", "/app/log/private/historical")
@app.scoped_txs()
def test_historical_query(network, args):
network.txs.issue(network, number_txs=2)
network.txs.issue(network, number_txs=2, repeat=True)
network.txs.verify()
primary, _ = network.find_nodes()
with primary.client("user0") as c:
r = c.get(
"/app/log/private/historical",
headers={infra.clients.CCF_TX_ID_HEADER: "99999.1"},
)
assert r.status_code == http.HTTPStatus.NOT_FOUND, r
assert r.body.json()["error"]["code"] == "TransactionInvalid", r
primary, _ = network.find_nodes()
with primary.client("user0") as c:
r = c.get(
"/app/log/private/historical",
headers={infra.clients.CCF_TX_ID_HEADER: "99999.999999"},
)
assert r.status_code == http.HTTPStatus.NOT_FOUND, r
assert r.body.json()["error"]["code"] == "TransactionPendingOrUnknown", r
return network
@reqs.description("Read historical receipts")
@reqs.supports_methods("/app/log/private", "/app/log/private/historical_receipt")
def test_historical_receipts(network, args):
primary, backups = network.find_nodes()
TXS_COUNT = 5
start_idx = network.txs.idx + 1
network.txs.issue(network, number_txs=TXS_COUNT)
for idx in range(start_idx, TXS_COUNT + start_idx):
for node in [primary, backups[0]]:
first_msg = network.txs.priv[idx][0]
first_receipt = network.txs.get_receipt(
node, idx, first_msg["seqno"], first_msg["view"]
)
r = first_receipt.json()["receipt"]
verify_receipt(r, network.cert)
# receipt.verify() and ccf.receipt.check_endorsement() raise if they fail, but do not return anything
verified = True
try:
ccf.receipt.verify(
hashlib.sha256(b"").hexdigest(), r["signature"], network.cert
)
except InvalidSignature:
verified = False
assert not verified
return network
@reqs.description("Read historical receipts with claims")
@reqs.supports_methods("/app/log/public", "/app/log/public/historical_receipt")
def test_historical_receipts_with_claims(network, args):
primary, backups = network.find_nodes()
TXS_COUNT = 5
start_idx = network.txs.idx + 1
network.txs.issue(network, number_txs=TXS_COUNT, record_claim=True)
for idx in range(start_idx, TXS_COUNT + start_idx):
for node in [primary, backups[0]]:
first_msg = network.txs.pub[idx][0]
first_receipt = network.txs.get_receipt(
node, idx, first_msg["seqno"], first_msg["view"], domain="public"
)
r = first_receipt.json()["receipt"]
verify_receipt(r, network.cert, first_receipt.json()["msg"].encode())
# receipt.verify() and ccf.receipt.check_endorsement() raise if they fail, but do not return anything
verified = True
try:
ccf.receipt.verify(
hashlib.sha256(b"").hexdigest(), r["signature"], network.cert
)
except InvalidSignature:
verified = False
assert not verified
return network
@reqs.description("Read range of historical state")
@reqs.supports_methods("/app/log/public", "/app/log/public/historical/range")
def test_historical_query_range(network, args):
id_a = 142
id_b = 143
id_c = 144
first_seqno = None
last_seqno = None
primary, _ = network.find_primary()
with primary.client("user0") as c:
# Submit many transactions, overwriting the same IDs
# Need to submit through network.txs so these can be verified at shutdown, but also need to submit one at a
# time to retrieve the submitted transactions
msgs = {}
n_entries = 100
def id_for(i):
if i == n_entries // 2:
return id_c
else:
return id_b if i % 3 == 0 else id_a
for i in range(n_entries):
idx = id_for(i)
network.txs.issue(
network,
repeat=True,
idx=idx,
wait_for_sync=False,
log_capture=[],
)
_, tx = network.txs.get_last_tx(idx=idx, priv=False)
msg = tx["msg"]
seqno = tx["seqno"]
view = tx["view"]
msgs[seqno] = msg
if first_seqno is None:
first_seqno = seqno
last_seqno = seqno
infra.commit.wait_for_commit(c, seqno=last_seqno, view=view, timeout=3)
entries_a, _ = network.txs.verify_range_for_idx(id_a, node=primary)
entries_b, _ = network.txs.verify_range_for_idx(id_b, node=primary)
entries_c, _ = network.txs.verify_range_for_idx(id_c, node=primary)
# Fetching A and B should take a similar amount of time, C (which was only written to in a brief window in the history) should be much faster
# NB: With larger page size, this is not necessarily true! Small range means _all_ responses fit in a single response page
# assert duration_c < duration_a
# assert duration_c < duration_b
# Confirm that we can retrieve these with more specific queries, and we end up with the same result
alt_a, _ = network.txs.verify_range_for_idx(
id_a, node=primary, from_seqno=first_seqno
)
assert alt_a == entries_a
alt_a, _ = network.txs.verify_range_for_idx(
id_a, node=primary, to_seqno=last_seqno
)
assert alt_a == entries_a
alt_a, _ = network.txs.verify_range_for_idx(
id_a, node=primary, from_seqno=first_seqno, to_seqno=last_seqno
)
assert alt_a == entries_a
actual_len = len(entries_a) + len(entries_b) + len(entries_c)
assert (
n_entries == actual_len
), f"Expected {n_entries} total entries, got {actual_len}"
# Iterate through both lists, by i, checking retrieved entries match expectations
for i in range(n_entries):
expected_id = id_for(i)
entries = (
entries_a
if expected_id == id_a
else (entries_b if expected_id == id_b else entries_c)
)
entry = entries.pop(0)
assert entry["id"] == expected_id
assert entry["msg"] == msgs[entry["seqno"]]
# Make sure this has checked every entry
assert len(entries_a) == 0
assert len(entries_b) == 0
assert len(entries_c) == 0
return network
@reqs.description("Read state at multiple distinct historical points")
@reqs.supports_methods("/app/log/private", "/app/log/private/historical/sparse")
def test_historical_query_sparse(network, args):
idx = 142
seqnos = []
primary, _ = network.find_primary()
with primary.client("user0") as c:
# Submit many transactions, overwriting the same ID
# Need to submit through network.txs so these can be verified at shutdown, but also need to submit one at a
# time to retrieve the submitted transactions
msgs = {}
n_entries = 100
for _ in range(n_entries):
network.txs.issue(
network,
repeat=True,
idx=idx,
wait_for_sync=False,
log_capture=[],
send_public=False,
)
_, tx = network.txs.get_last_tx(idx=idx)
msg = tx["msg"]
seqno = tx["seqno"]
view = tx["view"]
msgs[seqno] = msg
seqnos.append(seqno)
infra.commit.wait_for_commit(c, seqno=seqnos[-1], view=view, timeout=3)
def get_sparse(client, target_id, seqnos, timeout=3):
seqnos_s = ",".join(str(n) for n in seqnos)
LOG.info(f"Getting historical entries: {seqnos_s}")
logs = []
start_time = time.time()
end_time = start_time + timeout
entries = {}
path = (
f"/app/log/private/historical/sparse?id={target_id}&seqnos={seqnos_s}"
)
while time.time() < end_time:
r = client.get(path, log_capture=logs)
if r.status_code == http.HTTPStatus.OK:
j_body = r.body.json()
for entry in j_body["entries"]:
assert entry["id"] == target_id, entry
entries[entry["seqno"]] = entry["msg"]
duration = time.time() - start_time
LOG.info(
f"Done! Fetched {len(entries)} entries in {duration:0.2f}s"
)
return entries, duration
elif r.status_code == http.HTTPStatus.ACCEPTED:
# Ignore retry-after header, retry soon
time.sleep(0.1)
continue
else:
LOG.error("Printing historical/sparse logs on unexpected status")
flush_info(logs, None)
raise ValueError(
f"Unexpected status code from historical sparse query: {r.status_code}"
)
LOG.error("Printing historical/sparse logs on timeout")
flush_info(logs, None)
raise TimeoutError(
f"Historical sparse query not available after {timeout}s"
)
entries_all, _ = get_sparse(c, idx, seqnos)
seqnos_a = [s for s in seqnos if random.random() < 0.7]
entries_a, _ = get_sparse(c, idx, seqnos_a)
seqnos_b = [s for s in seqnos if random.random() < 0.5]
entries_b, _ = get_sparse(c, idx, seqnos_b)
small_range = len(seqnos) // 20
seqnos_c = seqnos[:small_range] + seqnos[-small_range:]
entries_c, _ = get_sparse(c, idx, seqnos_c)
def check_presence(expected, entries, seqno):
if seqno in expected:
assert seqno in entries, f"Missing result for {seqno}"
assert (
entries[seqno] == msgs[seqno]
), f"{entries[seqno]} != {msgs[seqno]}"
for seqno in seqnos:
check_presence(seqnos, entries_all, seqno)
check_presence(seqnos_a, entries_a, seqno)
check_presence(seqnos_b, entries_b, seqno)
check_presence(seqnos_c, entries_c, seqno)
return network
def escaped_query_tests(c, endpoint):
samples = [
{"this": "that"},
{"this": "that", "other": "with spaces"},
{"this with spaces": "with spaces"},
{"arg": 'This has many@many many \\% " AWKWARD :;-=?!& characters %20%20'},
]
for query in samples:
unescaped_query = "&".join([f"{k}={v}" for k, v in query.items()])
query_to_send = unescaped_query
if os.getenv("CURL_CLIENT"):
query_to_send = urllib.parse.urlencode(query)
r = c.get(f"/app/log/{endpoint}?{query_to_send}")
assert r.body.text() == unescaped_query, (
r.body.text(),
unescaped_query,
)
all_chars = list(range(0, 255))
max_args = 50
for ichars in [
all_chars[i : i + max_args] for i in range(0, len(all_chars), max_args)
]:
encoded, raw = [], []
for ichar in ichars:
char = chr(ichar)
encoded.append(urllib.parse.urlencode({"arg": char}))
raw.append(f"arg={char}")
r = c.get(f"/app/log/{endpoint}?{'&'.join(encoded)}")
assert r.body.data() == "&".join(raw).encode(), r.body.data()
encoded, raw = [], []
for ichar in ichars:
char = chr(ichar)
encoded.append(urllib.parse.urlencode({f"arg{char}": "value"}))
raw.append(f"arg{char}=value")
r = c.get(f"/app/log/{endpoint}?{'&'.join(encoded)}")
assert r.body.data() == "&".join(raw).encode(), r.body.data()
@reqs.description("Testing forwarding on member and user frontends")
@reqs.supports_methods("/app/log/private")
@reqs.at_least_n_nodes(2)
@reqs.no_http2()
@app.scoped_txs()
def test_forwarding_frontends(network, args):
backup = network.find_any_backup()
try:
with backup.client() as c:
check_commit = infra.checker.Checker(c)
ack = network.consortium.get_any_active_member().ack(backup)
check_commit(ack)
except AckException as e:
assert args.http2 is True
assert e.response.status_code == http.HTTPStatus.NOT_IMPLEMENTED
r = e.response.body.json()
assert (
r["error"]["message"]
== "Request cannot be forwarded to primary on HTTP/2 interface."
), r
else:
assert args.http2 is False
try:
msg = "forwarded_msg"
log_id = 7
network.txs.issue(
network,
number_txs=1,
on_backup=True,
idx=log_id,
send_public=False,
msg=msg,
)
except infra.logging_app.LoggingTxsIssueException as e:
assert args.http2 is True
assert e.response.status_code == http.HTTPStatus.NOT_IMPLEMENTED
r = e.response.body.json()
assert (
r["error"]["message"]
== "Request cannot be forwarded to primary on HTTP/2 interface."
), r
else:
assert args.http2 is False
if args.package == "samples/apps/logging/liblogging" and not args.http2:
with backup.client("user0") as c:
escaped_query_tests(c, "request_query")
return network
@reqs.description("Testing forwarding on user frontends without actor app prefix")
@reqs.at_least_n_nodes(2)
@reqs.no_http2()
def test_forwarding_frontends_without_app_prefix(network, args):
msg = "forwarded_msg"
log_id = 7
network.txs.issue(
network,
number_txs=1,
on_backup=True,
idx=log_id,
send_public=False,
msg=msg,
private_url="/log/private",
)
return network
@reqs.description("Testing forwarding on long-lived connection")
@reqs.supports_methods("/app/log/private")
@reqs.at_least_n_nodes(2)
@reqs.no_http2()
def test_long_lived_forwarding(network, args):
primary, _ = network.find_primary()
# Create a new node
new_node = network.create_node("local://localhost")
# Message limit must be high enough that the hard limit will not be reached
# by the combined work of all threads. Note that each thread produces multiple
# node-to-node messages - a forwarded write and response, Raft AEs. If these
# arrive too fast, they will trigger the hard cap and the node-to-node keys
# will be reset, potentially invalidating in-flight messages and causing client
# requests to time out.
n_threads = 5
message_limit = 30
new_node_args = copy.deepcopy(args)
new_node_args.node_to_node_message_limit = message_limit
network.join_node(new_node, args.package, new_node_args)
network.trust_node(new_node, new_node_args)
# Send many messages to new node over long-lived connections,
# to confirm that forwarding continues to work during
# node-to-node channel key rotations
def fn(worker_id, request_count, should_log):
with new_node.client("user0") as c:
msg = "Will be forwarded"
log_id = 42
for i in range(request_count):
logs = []
if should_log and i % 10 == 0:
LOG.info(f"Sending {i} / {request_count}")
logs = None
r = c.post(
f"/app/log/private?scope=long-lived-forwarding-{worker_id}",
{"id": log_id, "msg": msg},
log_capture=logs,
)
assert r.status_code == http.HTTPStatus.OK, r
threads = []
current_thread_name = threading.current_thread().name
for i in range(n_threads):
threads.append(
threading.Thread(
target=fn,
args=(i, 3 * message_limit, i == 0),
name=f"{current_thread_name}:worker-{i}",
)
)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Remove temporary new node
network.retire_node(primary, new_node)
new_node.stop()
return network
@reqs.description("Test user-data used for access permissions")
@reqs.supports_methods("/app/log/private/admin_only")
def test_user_data_ACL(network, args):
primary, _ = network.find_primary()
user = network.users[0]
def by_set_user_data(user_data):
network.consortium.set_user_data(primary, user.service_id, user_data=user_data)
def by_set_user(user_data):
network.consortium.add_user(primary, user.local_id, user_data=user_data)
for set_user_data in (by_set_user_data, by_set_user):
# Give isAdmin permissions to a single user
set_user_data(user_data={"isAdmin": True})
log_id = network.txs.find_max_log_id() + 1
# Confirm that user can now use this endpoint
with primary.client(user.local_id) as c:
r = c.post(
"/app/log/private/admin_only", {"id": log_id, "msg": "hello world"}
)
assert r.status_code == http.HTTPStatus.OK.value, r.status_code
# Remove permission
set_user_data(user_data={"isAdmin": False})
# Confirm that user is now forbidden on this endpoint
with primary.client(user.local_id) as c:
r = c.post(
"/app/log/private/admin_only", {"id": log_id, "msg": "hello world"}
)
assert r.status_code == http.HTTPStatus.FORBIDDEN.value, r.status_code
return network
@reqs.description("Check for commit of every prior transaction")
def test_view_history(network, args):
check = infra.checker.Checker()
previous_node = None
previous_tx_ids = ""
for node in network.get_joined_nodes():
with node.client("user0") as c:
r = c.get("/node/commit")
check(c)
commit_tx_id = TxID.from_str(r.body.json()["transaction_id"])
# Retrieve status for all possible Tx IDs
seqno_to_views = {}
for seqno in range(1, commit_tx_id.seqno + 1):
views = []
for view in range(1, commit_tx_id.view + 1):
r = c.get(f"/node/tx?transaction_id={view}.{seqno}", log_capture=[])
check(r)
status = TxStatus(r.body.json()["status"])
if status == TxStatus.Committed:
views.append(view)
seqno_to_views[seqno] = views
# Check we have exactly one Tx ID for each seqno
txs_ok = True
for seqno, views in seqno_to_views.items():
if len(views) != 1:
txs_ok = False
LOG.error(
f"Node {node.node_id}: Found {len(views)} committed Tx IDs for seqno {seqno}"
)
tx_ids_condensed = ", ".join(
" OR ".join(f"{view}.{seqno}" for view in views or ["UNKNOWN"])
for seqno, views in seqno_to_views.items()
)
if txs_ok:
LOG.success(
f"Node {node.node_id}: Found a valid sequence of Tx IDs:\n{tx_ids_condensed}"
)
else:
LOG.error(
f"Node {node.node_id}: Invalid sequence of Tx IDs:\n{tx_ids_condensed}"
)
raise RuntimeError(
f"Node {node.node_id}: Incomplete or inconsistent view history"
)
# Compare view history between nodes
if previous_tx_ids:
# Some nodes may have a slightly longer view history so only compare the common prefix
min_tx_ids_len = min(len(previous_tx_ids), len(tx_ids_condensed))
assert (
tx_ids_condensed[:min_tx_ids_len]
== previous_tx_ids[:min_tx_ids_len]
), f"Tx IDs don't match between node {node.node_id} and node {previous_node.node_id}: {tx_ids_condensed[:min_tx_ids_len]} and {previous_tx_ids[:min_tx_ids_len]}"
previous_tx_ids = tx_ids_condensed
previous_node = node
return network
class SentTxs:
# view -> seqno -> status
txs = defaultdict(lambda: defaultdict(lambda: TxStatus.Unknown))
@staticmethod
def update_status(view, seqno, status=None):
current_status = SentTxs.txs[view][seqno]
if status is None:
# If you don't know the current status, we exit here. Since we have
# accessed the value in the defaultdict, we have recorded this tx id
# so it will be returned by future calls to get_all_tx_ids()
return
if status != current_status:
valid = False
# Only valid transitions from Unknown to any, or Pending to Committed/Invalid
if current_status == TxStatus.Unknown:
valid = True
elif current_status == TxStatus.Pending and (
status == TxStatus.Committed or status == TxStatus.Invalid
):
valid = True
if valid:
SentTxs.txs[view][seqno] = status
else:
raise ValueError(
f"Transaction {view}.{seqno} making invalid transition from {current_status} to {status}"
)
@staticmethod
def get_all_tx_ids():
return [
(view, seqno)
for view, view_txs in SentTxs.txs.items()
for seqno, status in view_txs.items()
]
@reqs.description("Build a list of Tx IDs, check they transition states as expected")
@reqs.supports_methods("/app/log/private")
@app.scoped_txs()
def test_tx_statuses(network, args):
primary, _ = network.find_primary()
with primary.client("user0") as c:
check = infra.checker.Checker()
r = network.txs.issue(network, 1, idx=0, send_public=False, msg="Ignored")
# Until this tx is committed, poll for the status of this and some other
# related transactions around it (and also any historical transactions we're tracking)
target_view = r.view
target_seqno = r.seqno
SentTxs.update_status(target_view, target_seqno)
SentTxs.update_status(target_view, target_seqno + 1)
SentTxs.update_status(target_view - 1, target_seqno)
end_time = time.time() + 10
while True:
if time.time() > end_time:
raise TimeoutError(
f"Took too long waiting for commit of {target_view}.{target_seqno}"
)
done = False
for view, seqno in SentTxs.get_all_tx_ids():
r = c.get(f"/node/tx?transaction_id={view}.{seqno}")
check(r)
status = TxStatus(r.body.json()["status"])
SentTxs.update_status(view, seqno, status)
if (
status == TxStatus.Committed
and target_view == view
and target_seqno == seqno
):
done = True
if done:
break
time.sleep(0.1)
return network
@reqs.description("Running transactions against logging app")
@reqs.supports_methods("/app/receipt", "/app/log/private")
@reqs.at_least_n_nodes(2)
@app.scoped_txs()
def test_receipts(network, args):
primary, _ = network.find_primary_and_any_backup()
msg = "Hello world"
LOG.info("Write/Read on primary")
with primary.client("user0") as c:
for j in range(10):
idx = j + 10000
r = network.txs.issue(network, 1, idx=idx, send_public=False, msg=msg)
start_time = time.time()
while time.time() < (start_time + 3.0):
rc = c.get(f"/app/receipt?transaction_id={r.view}.{r.seqno}")
if rc.status_code == http.HTTPStatus.OK:
receipt = rc.body.json()
verify_receipt(receipt, network.cert)
break
elif rc.status_code == http.HTTPStatus.ACCEPTED:
time.sleep(0.5)
else:
assert False, rc
return network
@reqs.description("Validate random receipts")
@reqs.supports_methods("/app/receipt", "/app/log/private")
@reqs.at_least_n_nodes(2)
def test_random_receipts(
network,
args,
lts=True,
additional_seqnos=MappingProxyType({}),
node=None,
log_capture=None,
):
if node is None:
node, _ = network.find_primary_and_any_backup()
common = os.listdir(network.common_dir)
cert_paths = [
os.path.join(network.common_dir, path)
for path in common
if re.compile(r"^\d+\.pem$").match(path)
]
certs = {}
for path in cert_paths:
with open(path, encoding="utf-8") as c:
cert = c.read()
certs[infra.crypto.compute_public_key_der_hash_hex_from_pem(cert)] = cert
with node.client("user0") as c:
r = c.get("/app/commit")
max_view, max_seqno = [
int(e) for e in r.body.json()["transaction_id"].split(".")
]
view = 2
genesis_seqno = 1
likely_first_sig_seqno = 2
last_sig_seqno = max_seqno
interesting_prefix = [genesis_seqno, likely_first_sig_seqno]
seqnos = range(len(interesting_prefix) + 1, max_seqno)
random_sample_count = 20 if lts else 50
for s in (
interesting_prefix
+ sorted(
random.sample(seqnos, min(random_sample_count, len(seqnos)))
+ list(additional_seqnos.keys())
)
+ [last_sig_seqno]
):
start_time = time.time()
while time.time() < (start_time + 3.0):
rc = c.get(
f"/app/receipt?transaction_id={view}.{s}", log_capture=log_capture
)
if rc.status_code == http.HTTPStatus.OK:
receipt = rc.body.json()
if "leaf" in receipt:
if not lts:
assert "proof" in receipt, receipt
assert len(receipt["proof"]) == 0, receipt
# Legacy signature receipt
LOG.warning(
f"Skipping verification of signature receipt at {view}.{s}"
)
else:
if lts and not receipt.get("cert"):
receipt["cert"] = certs[receipt["node_id"]]
verify_receipt(
receipt,
network.cert,
claims=additional_seqnos.get(s),
generic=True,
skip_endorsement_check=lts,
)
break
elif rc.status_code == http.HTTPStatus.ACCEPTED:
time.sleep(0.1)
else:
view += 1
if view > max_view:
assert False, rc
return network
@reqs.description("Test basic app liveness")
@reqs.at_least_n_nodes(1)
@app.scoped_txs()
def test_liveness(network, args):
network.txs.issue(
network=network,
number_txs=3,
)
network.txs.verify()
return network
@reqs.description("Rekey the ledger once")
@reqs.at_least_n_nodes(1)
def test_rekey(network, args):
primary, _ = network.find_primary()
network.consortium.trigger_ledger_rekey(primary)
return network
@reqs.description("Test empty URI behaviour")
def test_empty_path(network, args):
primary, _ = network.find_primary()
with primary.client() as c:
r = c.get("/")
assert r.status_code == http.HTTPStatus.NOT_FOUND
r = c.post("/")
assert r.status_code == http.HTTPStatus.NOT_FOUND
@reqs.description("Test UDP echo endpoint")
@reqs.at_least_n_nodes(1)
def test_udp_echo(network, args):
# For now, only test UDP on primary
primary, _ = network.find_primary()
udp_interface = primary.host.rpc_interfaces["udp_interface"]
host = udp_interface.public_host
port = udp_interface.public_port
LOG.info(f"Testing UDP echo server at {host}:{port}")
server_address = (host, port)
buffer_size = 1024
test_string = b"Some random text"
attempts = 10
attempt = 1
while attempt <= attempts:
LOG.info(f"Testing UDP echo server sending '{test_string}'")
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.settimeout(3)
s.sendto(test_string, server_address)
recv = s.recvfrom(buffer_size)
text = recv[0]
LOG.info(f"Testing UDP echo server received '{text}'")
assert text == test_string
attempt = attempt + 1
@reqs.description("Check post-local-commit failure handling")
@reqs.supports_methods("/app/log/private/anonymous/v2")
def test_post_local_commit_failure(network, args):
primary, _ = network.find_primary()
with primary.client() as c:
r = c.post(
"/app/log/private/anonymous/v2?fail=false", {"id": 100, "msg": "hello"}
)
assert r.status_code == http.HTTPStatus.OK.value, r.status_code
assert r.body.json()["success"] is True
TxID.from_str(r.body.json()["tx_id"])
r = c.post(
"/app/log/private/anonymous/v2?fail=true", {"id": 101, "msg": "world"}
)
assert (
r.status_code == http.HTTPStatus.INTERNAL_SERVER_ERROR.value
), r.status_code
txid_header_key = "x-ms-ccf-transaction-id"
# check we can parse the txid from the header
# this gets set since the post-commit handler threw
TxID.from_str(r.headers[txid_header_key])
assert r.body.json() == {
"error": {
"code": "InternalError",
"message": "Failed to execute local commit handler func: didn't set user_data!",
}
}, r.body.json()
@reqs.description(
"Check that the committed index gets populated with creates and deletes"
)
@reqs.supports_methods("/app/log/private/committed", "/app/log/private")
def test_committed_index(network, args, timeout=5):
def get_strategies(client):
# Also test /node/index/strategies here, since this test already adds and
# removes indexing strategies
res = client.get("/node/index/strategies")
assert res.status_code == http.HTTPStatus.OK
# Dictify here for easy lookup
return {o["name"]: o for o in res.body.json()}
remote_node, _ = network.find_primary()
strategy_name = "CommittedRecords records"
with remote_node.client() as c:
strategies = get_strategies(c)
assert strategy_name not in strategies
res = c.post("/app/log/private/install_committed_index")
assert res.status_code == http.HTTPStatus.OK
strategies = get_strategies(c)
assert strategy_name in strategies
txid = network.txs.issue(network, number_txs=1, send_public=False)
_, log_id = network.txs.get_log_id(txid)
start_time = time.time()
end_time = start_time + timeout
while time.time() < end_time:
r = network.txs.request(log_id, priv=True, url_suffix="committed")
if r.status_code == http.HTTPStatus.OK.value:
break
current_tx_id = TxID.from_str(r.body.json()["error"]["current_txid"])
LOG.info(f"Current Tx ID ({current_tx_id}) - Tx ID ({txid})")
if current_tx_id >= txid:
break
LOG.warning("Current Tx ID is behind, retrying...")
time.sleep(1)
assert r.status_code == http.HTTPStatus.OK.value, r.status_code
assert r.body.json() == {"msg": f"Private message at idx {log_id} [0]"}
network.txs.delete(log_id, priv=True)
r = network.txs.request(log_id, priv=True)
assert r.status_code == http.HTTPStatus.BAD_REQUEST.value, r.status_code
assert r.body.json()["error"]["message"] == f"No such record: {log_id}."
assert r.body.json()["error"]["code"] == "ResourceNotFound"
r = network.txs.request(log_id, priv=True, url_suffix="committed")
assert r.status_code == http.HTTPStatus.BAD_REQUEST.value, r.status_code
assert r.body.json()["error"]["message"] == f"No such record: {log_id}."
assert r.body.json()["error"]["code"] == "ResourceNotFound"
# Uninstall index before proceeding
with remote_node.client() as c:
res = c.post("/app/log/private/uninstall_committed_index")
assert res.status_code == http.HTTPStatus.OK
strategies = get_strategies(c)
assert strategy_name not in strategies
@reqs.description(
"Check BasicConstraints are set correctly on network and node certificates"
)
def test_basic_constraints(network, args):
primary, _ = network.find_primary()
ca_path = os.path.join(network.common_dir, "service_cert.pem")
with open(ca_path, encoding="utf-8") as ca:
ca_pem = ca.read()
ca_cert = load_pem_x509_certificate(ca_pem.encode(), default_backend())
basic_constraints = ca_cert.extensions.get_extension_for_oid(
ObjectIdentifier("2.5.29.19")
)
assert basic_constraints.critical is True
assert basic_constraints.value.ca is True
assert basic_constraints.value.path_length == 0
node_pem = primary.get_tls_certificate_pem()
node_cert = load_pem_x509_certificate(node_pem.encode(), default_backend())
basic_constraints = node_cert.extensions.get_extension_for_oid(
ObjectIdentifier("2.5.29.19")
)
assert basic_constraints.critical is True
assert basic_constraints.value.ca is False
def run_udp_tests(args):
# Register secondary interface as an UDP socket on all nodes
udp_interface = infra.interfaces.make_secondary_interface("udp", "udp_interface")
udp_interface["udp_interface"].app_protocol = "QUIC"
for node in args.nodes:
node.rpc_interfaces.update(udp_interface)
txs = app.LoggingTxs("user0")
with infra.network.network(
args.nodes,
args.binary_dir,
args.debug_nodes,
args.perf_nodes,
pdb=args.pdb,
txs=txs,
) as network:
network.start(args)
test_udp_echo(network, args)
def run(args):
# Listen on two additional RPC interfaces for each node
def additional_interfaces(local_node_id):
return {
"first_interface": f"127.{local_node_id}.0.1",
"second_interface": f"127.{local_node_id}.0.2",
}
for local_node_id, node_host in enumerate(args.nodes):
for interface_name, host in additional_interfaces(local_node_id).items():
node_host.rpc_interfaces[interface_name] = infra.interfaces.RPCInterface(
host=host,
app_protocol="HTTP2" if args.http2 else "HTTP1",
)
txs = app.LoggingTxs("user0")
with infra.network.network(
args.nodes,
args.binary_dir,
args.debug_nodes,
args.perf_nodes,
pdb=args.pdb,
txs=txs,
) as network:
network.start_and_open(args)
test_basic_constraints(network, args)
test(network, args)
test_remove(network, args)
test_clear(network, args)
test_record_count(network, args)
# HTTP2 doesn't support forwarding
if not args.http2:
test_forwarding_frontends(network, args)
test_forwarding_frontends_without_app_prefix(network, args)
test_long_lived_forwarding(network, args)
test_user_data_ACL(network, args)
test_cert_prefix(network, args)
test_anonymous_caller(network, args)
test_multi_auth(network, args)
test_custom_auth(network, args)
test_custom_auth_safety(network, args)
test_raw_text(network, args)
test_historical_query(network, args)
test_historical_query_range(network, args)
test_view_history(network, args)
test_metrics(network, args)
test_empty_path(network, args)
if args.package == "samples/apps/logging/liblogging":
# Local-commit lambda is currently only supported in C++
test_post_local_commit_failure(network, args)
# Custom indexers currently only supported in C++
test_committed_index(network, args)
test_liveness(network, args)
test_rekey(network, args)
test_liveness(network, args)
test_random_receipts(network, args, False)
if args.package == "samples/apps/logging/liblogging":
test_receipts(network, args)
test_historical_query_sparse(network, args)
test_historical_receipts(network, args)
test_historical_receipts_with_claims(network, args)
def run_parsing_errors(args):
txs = app.LoggingTxs("user0")
with infra.network.network(
args.nodes,
args.binary_dir,
args.debug_nodes,
args.perf_nodes,
pdb=args.pdb,
txs=txs,
) as network:
network.start_and_open(args)
test_illegal(network, args)
test_protocols(network, args)
if __name__ == "__main__":
cr = ConcurrentRunner()
cr.add(
"js",
run,
package="libjs_generic",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
initial_user_count=4,
initial_member_count=2,
)
cr.add(
"cpp",
run,
package="samples/apps/logging/liblogging",
js_app_bundle=None,
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
initial_user_count=4,
initial_member_count=2,
)
cr.add(
"common",
e2e_common_endpoints.run,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
)
# Run illegal traffic tests in separate runners, to reduce total serial runtime
cr.add(
"js_illegal",
run_parsing_errors,
package="libjs_generic",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
)
cr.add(
"cpp_illegal",
run_parsing_errors,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
)
# This is just for the UDP echo test for now
cr.add(
"udp",
run_udp_tests,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
)
cr.run()
|
6b989bd0f62b751ce67b50ff5a1fd5bdf6ffd4eb
|
7653ddbbc2256fae9cc62251f0241d0e9696df7d
|
/pyshtools/expand/__init__.py
|
f1057aa9d6ab4879bdacb8f04d610698fe77c15a
|
[
"BSD-3-Clause"
] |
permissive
|
SHTOOLS/SHTOOLS
|
c3415b38da290805ecdfd59699587e5ac5233cc8
|
93e77dcc6b36b2363f07d79d07ec47d86e6cba65
|
refs/heads/master
| 2023-08-31T01:35:49.211882
| 2023-08-28T10:50:08
| 2023-08-28T10:50:08
| 24,725,612
| 315
| 117
|
BSD-3-Clause
| 2023-08-28T10:50:10
| 2014-10-02T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,591
|
py
|
__init__.py
|
"""
pyshtools Spherical Harmonic Expansion Routines.
This subpackage of pyshtools defines the following functions:
Equally sampled (N by N) and equally spaced (N by 2N) Grids
-----------------------------------------------------
SHExpandDH Expand an equally sampled or equally spaced map into spherical
harmonics using Driscoll and Healy's (1994) sampling theorem.
MakeGridDH Create a 2D map from a set of spherical harmonic coefficients
that conforms with Driscoll and Healy's (1994) sampling theorem.
SHExpandDHC Expand an equally sampled or equally spaced complex map into
complex spherical harmonics using Driscoll and Healy's (1994)
sampling theorem.
MakeGridDHC Create a 2D complex map from a set of complex spherical harmonic
coefficients that conforms with Driscoll and Healy's (1994)
sampling theorem.
MakeGradientDH Compute the gradient of a scalar function and return grids of
the two horizontal components that conform with Driscoll and
Healy's (1994) sampling theorem.
Gauss-Legendre quadrature grids
-------------------------------
SHGLQ Precompute the weights and nodes used in the GLQ-based spherical
harmonics routines.
SHExpandGLQ Expand a 2D map sampled on the Gauss-Legendre quadrature nodes
into spherical harmonics.
MakeGridGLQ Create a 2D map from a set of spherical harmonic coefficients
sampled on a the Gauss-Legendre quadrature nodes.
SHExpandGLQC Expand a 2D complex map sampled on the Gauss-Legendre quadrature
nodes into complex spherical harmonics.
MakeGridGLQC Create a 2D complex map from a set of complex spherical harmonic
coefficients sampled on a the Gauss-Legendre quadrature nodes.
GLQGridCoord Compute the latitude and longitude coordinates used in Gauss-
Legendre quadrature grids.
Other
-----
SHExpandLSQ Expand a set of irregularly sampled data points into spherical
harmonics using a least squares inversion.
SHExpandWLSQ Expand a set of irregularly sampled data points into spherical
harmonics using a weighted least squares inversion.
MakeGrid2D Create a 2D cylindrical map with arbitrary grid spacing from a
set of spherical harmonic coefficients.
MakeGridPoint Evaluate a real function expressed in real spherical harmonics
at a single point.
MakeGridPointC Evaluate a complex function expressed in complex spherical
harmonics at a single point.
SHMultiply Multiply two functions and determine the spherical harmonic
coefficients of the resulting function.
spharm Compute all the spherical harmonic functions up to a maximum
degree and order.
spharm_lm Compute the spherical harmonic function for a specific degree l
and order m.
"""
from ..backends.shtools import SHGLQ
from ..backends.shtools import GLQGridCoord
from ..backends.shtools import SHExpandLSQ
from ..backends.shtools import SHExpandWLSQ
from ..backends.shtools import MakeGrid2D
from ..backends.shtools import MakeGridPoint
from ..backends.shtools import MakeGridPointC
from ..backends.shtools import SHMultiply
from .spharm_functions import spharm
from .spharm_functions import spharm_lm
from ..backends import backend_module, select_preferred_backend
del spharm_functions # noqa: F821
def inject_backend_specific_functions_for_expand():
mod = backend_module()
global SHExpandGLQ
SHExpandGLQ = mod.SHExpandGLQ
global SHExpandGLQC
SHExpandGLQC = mod.SHExpandGLQC
global SHExpandDH
SHExpandDH = mod.SHExpandDH
global SHExpandDHC
SHExpandDHC = mod.SHExpandDHC
global MakeGridGLQ
MakeGridGLQ = mod.MakeGridGLQ
global MakeGridGLQC
MakeGridGLQC = mod.MakeGridGLQC
global MakeGridDH
MakeGridDH = mod.MakeGridDH
global MakeGridDHC
MakeGridDHC = mod.MakeGridDHC
global MakeGradientDH
MakeGradientDH = mod.MakeGradientDH
# trigger the injection of the backend-specific functions
select_preferred_backend()
# ---- Define __all__ for use with: from pyshtools import * ----
__all__ = ['SHExpandDH', 'MakeGridDH', 'SHExpandDHC', 'MakeGridDHC',
'SHGLQ', 'SHExpandGLQ', 'MakeGridGLQ', 'SHExpandGLQC',
'MakeGridGLQC', 'GLQGridCoord', 'SHExpandLSQ', 'SHExpandWLSQ',
'MakeGrid2D', 'MakeGridPoint', 'MakeGridPointC', 'SHMultiply',
'MakeGradientDH', 'spharm', 'spharm_lm']
|
c6762b0f93661be2d613ad4d67d8c8f6c4c44919
|
2727d25453482392dccb9a96d22df35fb53d5ce8
|
/hypernetx/algorithms/__init__.py
|
61ff87494404150478c3dd0378bf8578cfc21d11
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pnnl/HyperNetX
|
a3c9ed016b04b7c37780ca38a8ddc64427aaf597
|
97ccb93ff9f0cc6ee887805e6974ebc2fd9718b3
|
refs/heads/master
| 2023-08-09T17:56:47.051977
| 2023-07-25T21:47:50
| 2023-07-25T22:04:42
| 154,165,172
| 378
| 76
|
NOASSERTION
| 2023-08-20T23:06:50
| 2018-10-22T15:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
__init__.py
|
from hypernetx.algorithms.homology_mod2 import (
kchainbasis,
bkMatrix,
swap_rows,
swap_columns,
add_to_row,
add_to_column,
logical_dot,
logical_matmul,
matmulreduce,
logical_matadd,
smith_normal_form_mod2,
reduced_row_echelon_form_mod2,
boundary_group,
chain_complex,
betti,
betti_numbers,
homology_basis,
hypergraph_homology_basis,
interpret,
)
from hypernetx.algorithms.s_centrality_measures import (
s_betweenness_centrality,
s_harmonic_closeness_centrality,
s_harmonic_centrality,
s_closeness_centrality,
s_eccentricity,
)
from hypernetx.algorithms.contagion import (
contagion_animation,
collective_contagion,
individual_contagion,
threshold,
majority_vote,
discrete_SIR,
discrete_SIS,
Gillespie_SIR,
Gillespie_SIS,
)
from hypernetx.algorithms.laplacians_clustering import (
prob_trans,
get_pi,
norm_lap,
spec_clus,
)
from hypernetx.algorithms.generative_models import (
erdos_renyi_hypergraph,
chung_lu_hypergraph,
dcsbm_hypergraph,
)
from hypernetx.algorithms.hypergraph_modularity import (
dict2part,
part2dict,
precompute_attributes,
linear,
majority,
strict,
modularity,
two_section,
kumar,
last_step,
)
__all__ = [
# homology_mod2 API's
"kchainbasis",
"bkMatrix",
"swap_rows",
"swap_columns",
"add_to_row",
"add_to_column",
"logical_dot",
"logical_matmul",
"matmulreduce",
"logical_matadd",
"smith_normal_form_mod2",
"reduced_row_echelon_form_mod2",
"boundary_group",
"chain_complex",
"betti",
"betti_numbers",
"homology_basis",
"hypergraph_homology_basis",
"interpret",
# contagion API's
"contagion_animation",
"collective_contagion",
"individual_contagion",
"threshold",
"majority_vote",
"discrete_SIR",
"discrete_SIS",
"Gillespie_SIR",
"Gillespie_SIS",
# laplacians_clustering API's
"prob_trans",
"get_pi",
"norm_lap",
"spec_clus",
# generative_models API's
"erdos_renyi_hypergraph",
"chung_lu_hypergraph",
"dcsbm_hypergraph",
# s_centreality_measures API's
"s_betweenness_centrality",
"s_harmonic_closeness_centrality",
"s_harmonic_centrality",
"s_closeness_centrality",
"s_eccentricity",
# hypergraph_modularity API's
"dict2part",
"part2dict",
"precompute_attributes",
"linear",
"majority",
"strict",
"modularity",
"two_section",
"kumar",
"last_step",
]
|
873d8ca1f57c0d76980c229ebf2f2a47b77e9864
|
503256764aec0d4eaecc73e325bcd909e0c41764
|
/python/ctypes/treenumbers.py
|
a6b3940971e88d8c11f85fe433a80ef13cf8eba3
|
[] |
no_license
|
hexchat/hexchat-addons
|
7b967ae33a8a7735304af82485ae71503f970210
|
16f6d169a2e46e10f4edac698ff7cbd476be5c7c
|
refs/heads/master
| 2023-06-17T15:34:53.304526
| 2023-05-02T18:12:34
| 2023-05-02T18:12:34
| 5,244,181
| 254
| 122
| null | 2023-05-02T18:09:16
| 2012-07-31T09:56:25
|
Perl
|
UTF-8
|
Python
| false
| false
| 12,824
|
py
|
treenumbers.py
|
from ctypes import *
import xchat
import traceback
import sys
__module_name__ = "treenumbers"
__module_version__ = "1.0"
__module_description__ = "Display tab numbers and an unread messages counter"
# from gtype.h
class GTypeClass(Structure):
_fields_ = [("g_type", c_uint)]
class GTypeInstance(Structure):
_fields_ = [("g_class", POINTER(GTypeClass))]
class GtkTreeIter(Structure):
_fields_ = [("stamp", c_int)
,("user_data", c_void_p)
,("user_data2", c_void_p)
,("user_data3", c_void_p)
]
# gparamspecs.h
class GParamSpec(Structure):
_fields_ = [("g_type_instance", GTypeInstance)
,("name", c_char_p)
,("flags", c_uint)
,("value_type", c_ulong)
,("owner_type", c_ulong)
]
class User(Structure):
_fields_ = [("nick", c_char * 30)
,("hostname", c_char_p)
,("realname", c_char_p)
,("servername", c_char_p)
]
# hexchat.h
class MSProxyState(Structure):
_fields_ = [("clientid", c_int)
,("serverid", c_int)
,("seq_recv", c_char)
,("seq_sent", c_char)
]
class Server(Structure):
_fields_ = [("f01", c_void_p) # stupid function pointers are at beginning
,("f02", c_void_p)
,("f03", c_void_p)
,("f04", c_void_p)
,("f05", c_void_p)
,("f06", c_void_p)
,("f07", c_void_p)
,("f08", c_void_p)
,("f09", c_void_p)
,("f10", c_void_p)
,("f11", c_void_p)
,("f12", c_void_p)
,("f13", c_void_p)
,("f14", c_void_p)
,("f15", c_void_p)
,("f16", c_void_p)
,("f17", c_void_p)
,("f18", c_void_p)
,("f19", c_void_p)
,("f20", c_void_p)
,("f21", c_void_p)
,("f22", c_void_p)
,("f23", c_void_p)
,("f24", c_void_p)
,("f25", c_void_p)
,("f26", c_void_p)
,("f27", c_void_p)
,("f28", c_void_p)
,("f29", c_void_p)
,("f30", c_void_p)
,("f31", c_void_p)
,("f32", c_void_p)
,("f33", c_void_p)
,("f34", c_void_p)
,("f35", c_void_p)
,("f36", c_void_p)
,("f37", c_void_p)
,("port", c_int)
,("sok", c_int)
,("sok4", c_int)
,("sok6", c_int)
,("proxy_type", c_int)
,("proxy_sok", c_int)
,("proxy_sok4", c_int)
,("proxy_sok6", c_int)
,("id", c_int)
# uncomment/comment per your config as appropriate
#ifdef USE_OPENSSL
,("ctx", c_void_p)
,("ssl", c_void_p)
,("ssl_do_connect_tag", c_int)
#else
#,("ssl_", c_void_p)
#endif
,("childread", c_int)
,("childwrite", c_int)
,("childpid", c_int)
,("iotag", c_int)
,("recondelay_tag", c_int)
,("joindelay_tag", c_int)
,("hostname", c_char * 128)
,("servername", c_char * 128)
]
class Session(Structure):
_fields_ = [("alert_beep", c_byte)
,("alert_taskbar", c_byte)
,("alert_tray", c_byte)
,("text_hidejoinpart", c_byte)
,("text_logging", c_byte)
,("text_scrollback", c_byte)
,("text_strip", c_byte)
,("server", POINTER(Server))
,("usertree", c_void_p)
,("me", POINTER(User))
,("channel", c_char * 300)
]
# chanview.c
class Chan(Structure):
_fields_ = [("chanview", c_void_p)
,("iter", GtkTreeIter)
,("userdata", POINTER(Session))
,("family", c_void_p)
,("impl", c_void_p)
,("icon", c_void_p)
,("allow_closure", c_short)
,("tag", c_short)
]
# since 2.9.6b1, gtkwin_ptr is properly set as the address in a hex string
gtkwin = int(xchat.get_info("gtkwin_ptr"), 16)
# TODO detect platform and appropriately load the DLL or SO
gtk = cdll.LoadLibrary("gtk-win32-2.0.dll")
gobject = cdll.LoadLibrary("gobject-2.0.dll")
glib = cdll.LoadLibrary("glib-2.0.dll")
GTKCALLBACK = CFUNCTYPE(None, POINTER(GTypeInstance), c_void_p)
gobject.g_type_check_instance_is_a.argtypes = [c_void_p, c_void_p]
gobject.g_type_check_instance_is_a.restype = c_bool
gobject.g_object_class_list_properties.argtypes = [c_void_p, POINTER(c_int)]
gobject.g_object_class_list_properties.restype = POINTER(POINTER(GParamSpec))
gobject.g_type_name.restype = c_char_p
gtk.gtk_tree_view_get_type.restype = POINTER(GTypeClass)
gtk.gtk_tree_model_iter_next.restype = c_bool
gtk.gtk_tree_path_to_string.restype = c_char_p
gtk.gtk_container_foreach.argtypes = [c_void_p, GTKCALLBACK, c_void_p]
gtk.gtk_tree_view_get_model.argtypes = [c_void_p]
gtk.gtk_tree_view_get_model.restype = c_void_p
gtk.gtk_tree_store_get_type.restype = POINTER(GTypeClass)
gtk.gtk_tree_model_get_iter_first.argtypes = [c_void_p, c_void_p]
gtk.gtk_tree_model_get_iter_first.restype = c_bool
gtk.gtk_tree_model_get.argtypes = [c_void_p, c_void_p, c_int, c_void_p, c_int, c_void_p, c_int]
gtk.gtk_tree_store_set.argtypes = [c_void_p, c_void_p, c_int, c_char_p, c_int]
gtk.gtk_tree_model_iter_children.argtypes = [c_void_p, c_void_p, c_void_p]
gtk.gtk_tree_model_iter_children.restype = c_bool
gtk.gtk_tree_model_iter_next.argtypes = [c_void_p, GtkTreeIter]
# can't mutate global data from C callbacks
class TreeNumerator:
treestore = None
activity = {}
prev_channels = []
timerhook = None
def __init__(self):
self.get_tree_store()
def get_tree_store(self):
if self.treestore:
return self.treestore
gtk.gtk_container_foreach(gtkwin,
GTKCALLBACK(self.get_tree_store_cb), None)
return self.treestore
def get_tree_store_cb(self, gobj, data):
if gobject.g_type_check_instance_is_a(gobj,
gtk.gtk_tree_view_get_type()):
gtk.gtk_tree_view_set_show_expanders(gobj, 0)
gtk.gtk_tree_view_set_enable_tree_lines(gobj, 0)
gtk.gtk_tree_view_set_level_indentation(gobj, 0)
store = gtk.gtk_tree_view_get_model(gobj)
if gobject.g_type_check_instance_is_a(
cast(store, POINTER(GTypeInstance)),
gtk.gtk_tree_store_get_type()):
self.treestore = store
return
gtk.gtk_container_foreach(gobj, GTKCALLBACK(self.get_tree_store_cb), None)
def process_tab(self, iter, n, seek=True):
v = c_char_p()
chan = POINTER(Chan)()
gtk.gtk_tree_model_get(self.treestore, byref(iter),
0, byref(v), # COL_NAME
1, byref(chan), # COL_CHAN
-1)
oldname = v.value
idx = v.value.find(" ")
if idx > 0:
oldname = v.value[idx:].strip()
glib.g_free(v)
activity = 0
if chan:
oldname = chan.contents.userdata.contents.channel
server = chan.contents.userdata.contents.server
if server:
servername = server.contents.servername
if servername:
activity = self.get_activity(servername, oldname)
if oldname:
newlabel = "(%d) %s" % (n, oldname.strip())
if activity != 0:
newlabel = "%s : %d" % (newlabel, activity)
gtk.gtk_tree_store_set(self.treestore, byref(iter),
0, c_char_p(newlabel), # COL_NAME
-1)
if seek:
return gtk.gtk_tree_model_iter_next(self.treestore, iter)
else:
return True
# prevent concurrent execution
enumerating = False
def enumerate_tabs(self):
if self.enumerating:
return
store = self.get_tree_store()
number = 1
iter = GtkTreeIter()
if not store:
return
has_next = gtk.gtk_tree_model_get_iter_first(
store, byref(iter))
while has_next:
self.process_tab(iter, number, False)
child = GtkTreeIter()
has_children = gtk.gtk_tree_model_iter_children(
store, byref(child), byref(iter))
number = number + 1
while has_children and self.process_tab(child, number):
number = number + 1
if has_children:
number = number + 1
has_next = gtk.gtk_tree_model_iter_next(self.treestore, iter)
self.enumerating = False
def enumerate_cb(self, data):
try:
self.enumerate_tabs()
except:
pass
if self.timerhook:
xchat.unhook(self.timerhook)
self.timerhook = None
def get_activity(self, server, channel):
key = server + ":" + channel
if key not in self.activity:
self.activity[key] = 0
return self.activity[key]
def add_activity(self, server, channel):
key = server + ":" + channel
if key not in self.activity:
self.activity[key] = 0
self.activity[key] = self.activity[key] + 1
if not self.timerhook:
self.timerhook = xchat.hook_timer(250, self.enumerate_cb)
def reset_activity_cb(self, word=None, word_eol=None, data=None):
channel = xchat.get_context().get_info("channel")
server = xchat.get_context().get_info("server")
if server and channel:
key = server + ":" + channel
self.activity[key] = 0
return xchat.EAT_NONE
def activity_cb(self, word=None, word_eol=None, data=None):
# enable after 2.9.6beta3
# see:
# https://github.com/hexchat/hexchat/commit/855c20501baba9e0bcda546b6c07f20dc5648659
# http://forum.xchat.org/viewtopic.php?f=5&t=7558
if xchat.get_context() != xchat.find_context():
channel = xchat.get_info("channel")
server = xchat.get_info("server")
self.add_activity(server, channel)
return xchat.EAT_NONE
def update_timer_cb(self, ignore_data):
try:
self.enumerate_tabs()
except RuntimeError as e:
self.log("R: unable to enumerate tabs: %s" % e)
except WindowsError as e:
pass
#self.log("W: unable to enumerate tabs: %s" %
# traceback.format_exception(*sys.exc_info()))
return 1
def log(self, msg):
ctx = xchat.find_context(channel=">>python<<")
if ctx:
# using emit_print results in an infinite loop with activity_cb
# with anything we hook_print
# even when filtering by channel != >>python<<
ctx.emit_print("Notice", "treenumbers", msg)
numerator = TreeNumerator()
# open context seems to overwrite any label changes, delay longer
hooks = []
def unload_cb(arg):
for hook in hooks:
xchat.unhook(hook)
numerator.log("successfully unloaded")
hooks.append(xchat.hook_unload(unload_cb))
def init(ignore_data=None):
hooks.append(xchat.hook_timer(500, numerator.update_timer_cb))
hooks.append(xchat.hook_print("Focus Tab",
numerator.reset_activity_cb))
for evt in ('Channel Action Hilight'
,'Channel Msg Hilight'
,'Channel Message'
,'Private Message to Dialog'
,'Private Action to Dialog'):
hooks.append(xchat.hook_print(evt, numerator.activity_cb))
try:
numerator.enumerate_tabs()
except WindowsError as e:
numerator.log("error on initial enumeration")
numerator.log("successfully loaded")
return 0 # do not repeat timer
# too soon and the plugin fails to load at startup, too many events firing...
# happens when connecting to my BNC, maybe not a problem with normal servers
xchat.hook_timer(5000, init)
|
85c13840def4def2095f6fe97507a75cbc60ed2b
|
d94a508dcaadf415201a1e001896f5ad82d618e1
|
/auto_forensicate/stamp/manager.py
|
382ba81514e788db929ae77077a2dccdea86fd99
|
[
"Apache-2.0"
] |
permissive
|
google/GiftStick
|
3d02700f46514c59d2cfc78db86f11806d5932b3
|
06059b1fc92db2bfd4d90ca9643433a7d8b633fe
|
refs/heads/main
| 2023-09-04T02:56:06.082469
| 2023-09-01T11:20:37
| 2023-09-01T11:20:37
| 154,346,214
| 128
| 26
|
Apache-2.0
| 2023-09-01T11:20:39
| 2018-10-23T14:49:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
manager.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Stamp classes."""
from collections import namedtuple
from auto_forensicate import hostinfo
BaseStamp = namedtuple('Stamp', ['identifier', 'start_time'])
class BaseStampManager(object):
"""Base class to generate the stamp file."""
def __init__(self, graphical=True):
"""Initializes a BaseStampManager object.
Args:
graphical (bool): whether we will request information from a graphical
environment.
"""
self._graphical = graphical
def BasePathElements(self, stamp):
"""Generates upload paths based on information in stamp.
Args:
stamp (BaseStamp): device information
Returns:
list(str): list of elements from the stamp
"""
remote_path_elems = [
stamp.start_time,
stamp.identifier
]
return remote_path_elems
def GetStamp(self, graphical=True):
"""Generates the "stamp" metadata to upload.
This contains information such as when the script is run, and the host's ID.
Args:
graphical(bool): Set to False if requesting the Stamp in an non-graphical
environment.
Returns:
BaseStamp: the content of the stamp.
"""
stamp = BaseStamp(
identifier=hostinfo.GetIdentifier(),
start_time=hostinfo.GetTime(),
)
return stamp
|
c8e0b42a3aad89413a1766612b82a1338094601a
|
854b94d7be92582bd191a7cb63143a95e5b5c337
|
/hyfetch/distros/artix_small.py
|
53356f0ff4d911db893e87f07e777136cd477a39
|
[
"MIT"
] |
permissive
|
hykilpikonna/hyfetch
|
673c0c999d0f3f542349824495ad6004f450ebac
|
98863df16d70b030696f4b94080d114396320f35
|
refs/heads/master
| 2023-08-17T10:41:10.289997
| 2023-08-17T03:37:23
| 2023-08-17T03:37:23
| 479,913,941
| 447
| 78
|
MIT
| 2023-09-14T14:39:18
| 2022-04-10T04:38:15
|
Shell
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
artix_small.py
|
# This file is automatically generated. Please do not modify.
from . import AsciiArt
artix_small = AsciiArt(match=r'''"artix_small"*''', color='6 6 7 1', ascii=r"""
${c1} '
'A'
'ooo'
'ookxo'
`ookxxo'
'. `ooko'
'ooo`. `oo'
'ooxxxoo`. `'
'ookxxxkooo.` .
'ookxxkoo'` .'oo'
'ooxoo'` .:ooxxo'
'io'` `'oo'
'` `'
""")
|
eeea3260b10018634d741330a7b13bb4d7bee252
|
2ab8c172bc4e9d3b3c75659585e83ade1e2eb832
|
/tests/tools/test_vi_model.py
|
15592072302b78d36b80a18f55999206927aeec3
|
[
"BSD-3-Clause"
] |
permissive
|
theislab/scvelo
|
0a5d717f8f025d7b6cf96ded2a7d5868f0484f43
|
d89ca6aecbe93256fbcdd8a521fdee2b9f2a673a
|
refs/heads/master
| 2023-07-25T16:28:58.484128
| 2023-07-25T15:21:12
| 2023-07-25T15:21:12
| 145,459,109
| 372
| 143
|
BSD-3-Clause
| 2023-07-25T15:21:14
| 2018-08-20T19:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
test_vi_model.py
|
from scvi.data import synthetic_iid
import scvelo as scv
from scvelo.tools import VELOVI
def test_preprocess_data():
adata = synthetic_iid()
adata.layers["spliced"] = adata.X.copy()
adata.layers["unspliced"] = adata.X.copy()
scv.pp.normalize_per_cell(adata)
scv.pp.log1p(adata)
scv.pp.moments(adata, n_pcs=30, n_neighbors=30)
# TODO: Use real data for this test
# preprocess_data(adata)
def test_velovi():
n_latent = 5
adata = synthetic_iid()
adata.layers["spliced"] = adata.X.copy()
adata.layers["unspliced"] = adata.X.copy()
VELOVI.setup_anndata(adata, unspliced_layer="unspliced", spliced_layer="spliced")
model = VELOVI(adata, n_latent=n_latent)
model.train(1, check_val_every_n_epoch=1, train_size=0.5)
model.get_latent_representation()
model.get_velocity()
model.get_latent_time()
model.get_state_assignment()
model.get_expression_fit()
model.get_directional_uncertainty()
model.get_permutation_scores(labels_key="labels")
model.history
# tests __repr__
print(model)
|
19f7e2752c3e5d3a23c6b8628904a192690be7ee
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/practise/Python之Django篇第六、七季:Web开发实战+用户登录模块/Django第七季完整项目源码-01/StudentMgr/apps/web/userweb/middleware/auth.py
|
f6954d5f202e00d055a7feef695a9b1e1a5c9ce8
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 695
|
py
|
auth.py
|
from django.shortcuts import redirect, reverse
from django.utils.deprecation import MiddlewareMixin
# 引入setting文件
from django.conf import settings
class Auth_Md(MiddlewareMixin):
def process_request(self, request):
# 获取当前请求的url
current_url = request.path_info
# 判断是否在白名单中
for item in settings.WHITE_URL_LIST:
if item == current_url:
return None
# 获取session信息
obj_user = request.session.get('user')
# 判断是否存在
if obj_user:
request.user = obj_user
else:
return redirect(reverse('login'))
|
79623ee7b58b4452d9c6460bc23d766707687823
|
c6b9b9f2fbc6c62e7a86b02718954661af3c564f
|
/mmflow/models/losses/ssim.py
|
fdc8f3750e3fc70bec9d02d083b8e42515921638
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmflow
|
a90ff072805ac79cbc0b277baded1e74d25cccf0
|
9fb1d2f1bb3de641ddcba0dd355064b6ed9419f4
|
refs/heads/master
| 2023-05-22T05:19:48.986601
| 2023-01-10T16:05:18
| 2023-01-10T16:05:18
| 428,493,460
| 808
| 110
|
Apache-2.0
| 2023-09-05T13:19:38
| 2021-11-16T02:42:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
ssim.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
def weighted_ssim(x: Tensor,
y: Tensor,
weight: Optional[Tensor] = None,
c1=0.01**2,
c2=0.03**2,
weight_epsilon=0.01):
"""Computes a weighted structured image similarity.
This function is modified from
https://github.com/google-research/google-research/blob/master/uflow/uflow_utils.py
Copyright 2022 The Google Research Authors.
Args:
x (Tensor): A Tensor representing a batch of images, of shape
[B, C, H, W].
y (Tensor): A Tensor representing a batch of images, of shape
[B, C, H, W].
weight (Tensor, optional): A Tensor of shape [H, W], representing
the weight of each pixel in both images when we come to calculate
moments (means and correlations). Defaults to None.
c1 (float): A floating point number, regularizes division by zero of
the means. Defaults to 0.01**2.
c2 (float): A floating point number, regularizes division by zero of
the means. Defaults to 0.03 ** 2.
weight_epsilon (float): A floating point number, used to regularize
division by the weight. Defaults to 0.01.
Returns:
A tuple of two Tensors. First, of shape [B, C, H-2, W-2], is scalar
similarity loss oer pixel per channel. It is needed so that we know
how much to weigh each pixel in the first tensor. For example, if
``weight`` was very small in some area of the images, the first tensor
will still assign a loss to these pixels, but we shouldn't take the
result too seriously.
"""
if c1 == float('inf') and c2 == float('inf'):
raise ValueError(
'Both c1 and c2 are infinite, SSIM loss is zero. This is '
'likely unintended.')
_, _, H, W = x.shape
if weight is None:
weight = torch.ones((H, W)).to(x)
else:
assert weight.shape == (H, W), \
f'image shape is {(H, W)}, but weight shape is {weight.shape}'
weight = weight[None, None, ...]
average_pooled_weight = F.avg_pool2d(weight, (3, 3), stride=(1, 1))
weight_plus_epsilon = weight + weight_epsilon
inverse_average_pooled_weight = 1.0 / (
average_pooled_weight + weight_epsilon)
def weighted_avg_pool3x3(z):
weighted_avg = F.avg_pool2d(
z * weight_plus_epsilon, (3, 3), stride=(1, 1))
return weighted_avg * inverse_average_pooled_weight
mu_x = weighted_avg_pool3x3(x)
mu_y = weighted_avg_pool3x3(y)
sigma_x = weighted_avg_pool3x3(x**2) - mu_x**2
sigma_y = weighted_avg_pool3x3(y**2) - mu_y**2
sigma_xy = weighted_avg_pool3x3(x * y) - mu_x * mu_y
if c1 == float('inf'):
ssim_n = (2 * sigma_xy + c2)
ssim_d = (sigma_x + sigma_y + c2)
elif c2 == float('inf'):
ssim_n = 2 * mu_x * mu_y + c1
ssim_d = mu_x**2 + mu_y**2 + c1
else:
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2)
result = ssim_n / ssim_d
return torch.clamp((1 - result) / 2, 0, 1)
|
4cbb73339f368c05af5d6b93f501261810b0344b
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/unittests/test_layout.py
|
3efdcff6d5e02b8c2db3996dba751b358a108726
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 687
|
py
|
test_layout.py
|
import unittest
from unittests import wtc
import wx
import os
#---------------------------------------------------------------------------
class layout_Tests(wtc.WidgetTestCase):
def test_layout(self):
frame = self.frame
panel = wx.Panel(frame)
panel.BackgroundColour = 'blue'
lc = wx.LayoutConstraints()
lc.top.SameAs(frame, wx.Top, 10)
lc.left.SameAs(frame, wx.Left, 10)
lc.bottom.SameAs(frame, wx.Bottom, 10)
lc.right.PercentOf(frame, wx.Right, 50)
panel.SetConstraints(lc)
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
d15b331b18300c36047ea497bfc968ef88ba9461
|
d26bcefcb646e1bf9843d4872b2f85c12a0872db
|
/molfeat/calc/_map4.py
|
578ee916c4ad53268512f4a26ee9a64c1a82c63f
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
datamol-io/molfeat
|
2247335df72fb6ac715869a5752d7215f61c5af0
|
4390f9fce25fa2da94338227f7c8f33a23e25b2a
|
refs/heads/main
| 2023-08-31T18:27:43.933571
| 2023-08-01T13:42:04
| 2023-08-01T13:42:04
| 613,548,667
| 111
| 14
|
Apache-2.0
| 2023-09-08T12:31:02
| 2023-03-13T19:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
_map4.py
|
from typing import Union
import datamol as dm
from loguru import logger
from molfeat.utils import requires
if requires.check("map4"):
from map4 import MAP4Calculator
else:
MAP4Calculator = requires.mock("map4")
def MAP4(
x: Union[dm.Mol, str],
dimensions: int = 2048,
radius: int = 2,
is_counted: bool = False,
is_folded: bool = True,
return_strings: bool = False,
**kwargs,
):
"""Compute MHFP fingerprint
Args:
x: input molecule
dimensions (int, optional): Length of the fingerprint (default: 2048).
radius (int, optional): Radius of the fingerprint (default: 3)
is_counted (bool, optional): Whether to use counted fingerprints (default: True)
is_folded (bool, optional): Whether to fold the fingerprint (default: True)
return_strings (bool, optional): Whether to return strings values (default: False)
Returns:
fp: fingerprint
"""
if not requires.check("map4"):
logger.error(
"`map4` is not available, please install it: https://github.com/reymond-group/map4"
)
raise ImportError("Cannot import `map4`")
if isinstance(x, str):
x = dm.to_mol(x)
map4_encoder = MAP4Calculator(
dimensions=dimensions,
radius=radius,
is_counted=is_counted,
is_folded=is_folded,
return_strings=return_strings,
)
encoded_fp = map4_encoder.calculate(x)
return encoded_fp
|
85fe908202f8cf50e14353232539503626614881
|
2dc24a356ebe7a362623780603379a5b35a65c2f
|
/terraform/stacks/bot/lambdas/python/slack_automation_bot/slack_bolt/context/respond/internals.py
|
28cbbbec4dc6f386a16e417e22001c16dbefc9bf
|
[
"MIT"
] |
permissive
|
cloud-sniper/cloud-sniper
|
cef08402f9109211c33909bdb3de07b16952e308
|
4b026da33695b25033c7667679f3cf552c4bf3b5
|
refs/heads/master
| 2023-06-24T20:46:02.377409
| 2023-04-14T14:48:45
| 2023-04-14T14:48:45
| 210,739,453
| 184
| 36
|
MIT
| 2023-04-14T14:48:46
| 2019-09-25T02:34:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
internals.py
|
from typing import Optional, Dict, Union, Any, Sequence
from slack_sdk.models.attachments import Attachment
from slack_sdk.models.blocks import Block
from slack_bolt.util.utils import convert_to_dict_list
def _build_message(
text: str = "",
blocks: Optional[Sequence[Union[dict, Block]]] = None,
attachments: Optional[Sequence[Union[dict, Attachment]]] = None,
response_type: Optional[str] = None,
replace_original: Optional[bool] = None,
delete_original: Optional[bool] = None,
) -> Dict[str, Any]:
message = {"text": text}
if blocks is not None and len(blocks) > 0:
message["blocks"] = convert_to_dict_list(blocks)
if attachments is not None and len(attachments) > 0:
message["attachments"] = convert_to_dict_list(attachments)
if response_type is not None:
message["response_type"] = response_type
if replace_original is not None:
message["replace_original"] = replace_original
if delete_original is not None:
message["delete_original"] = delete_original
return message
|
b47049626a7ce07bf85d71bad8791eb9795af62d
|
e384f5467d8bcfd70845997bcbd68d950e874a61
|
/example/python/MyLibOGL/ogl/vertex.py
|
536abab5d55767b05fe3e7d4bac2cb74c8405353
|
[] |
no_license
|
Rabbid76/graphics-snippets
|
ee642f1ed9ceafc6d320e467d3a084d2446d22c2
|
fa187afeabb9630bc1d988304fb5787e95a91385
|
refs/heads/master
| 2023-08-04T04:32:06.884318
| 2023-07-21T09:15:43
| 2023-07-21T09:15:43
| 109,126,544
| 177
| 12
| null | 2023-04-11T20:05:52
| 2017-11-01T12:05:56
|
C++
|
UTF-8
|
Python
| false
| false
| 17,403
|
py
|
vertex.py
|
import sys
import math
#import array
# Numpy improt [http://www.numpy.org/]
import numpy
# PyOpenGL import [http://pyopengl.sourceforge.net/]
from OpenGL.GL import *
# TODO numpy -> array [https://docs.python.org/3/library/array.html]
# TODO use double array
# vertex array object
class VAObject:
# dataArrays: e.g. [ (3, [Vx0, Vy0, Vz0, Vx1, Vy1, Vz1, .... ]), (3, [ Nx0, Ny0, Nz0, .... ]), (3, [ Cr0, Cg0, Cb0, .... ]), (2, [ Tu0, Tv0, ..... ]) ]
def __init__( self, dataArrays, indices = [], type = GL_TRIANGLES, patch_vertices = 3 ):
self.__obj = glGenVertexArrays( 1 )
self.__noOfIndices = len( indices )
self.__indexArr = numpy.array( indices, dtype=numpy.uint32 )
self.__type = type
self.__patch_vertices = patch_vertices
self.__vertexSize = []
self.__dataLength = []
self.__noOfBuffers = len( dataArrays )
self.__buffers = glGenBuffers( self.__noOfBuffers )
glBindVertexArray( self.__obj )
for i_buffer in range( 0, self.__noOfBuffers ):
vertexSize, dataArr = dataArrays[i_buffer]
self.__vertexSize.append( vertexSize )
self.__dataLength.append( len( dataArr ) )
glBindBuffer( GL_ARRAY_BUFFER, self.__buffers if self.__noOfBuffers == 1 else self.__buffers[i_buffer] )
glBufferData( GL_ARRAY_BUFFER, numpy.array( dataArr, dtype=numpy.float32 ), GL_STATIC_DRAW )
glEnableVertexAttribArray( i_buffer )
glVertexAttribPointer( i_buffer, self.__vertexSize[i_buffer], GL_FLOAT, GL_FALSE, 0, None )
self.__iBuffer = glGenBuffers( 1 )
glBindBuffer( GL_ARRAY_BUFFER, 0 )
if self.__noOfIndices > 0:
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, self.__iBuffer )
glBufferData( GL_ELEMENT_ARRAY_BUFFER, self.__indexArr, GL_STATIC_DRAW )
glBindVertexArray( 0 )
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 )
def DrawArray(self):
glBindVertexArray( self.__obj )
if self.__type == GL_PATCHES:
glPatchParameteri( GL_PATCH_VERTICES, self.__patch_vertices )
glDrawArrays( self.__type, 0, self.__dataLength[0] )
glBindVertexArray( 0 )
def Draw(self):
if self.__noOfIndices == 0:
self.DrawArray()
return
glBindVertexArray( self.__obj )
#for i_buffer in range( 0, self.__noOfBuffers ):
# glBindBuffer( GL_ARRAY_BUFFER, self.__buffers if self.__noOfBuffers == 1 else self.__buffers[i_buffer] )
# glEnableVertexAttribArray( i_buffer )
# glVertexAttribPointer( i_buffer, self.__vertexSize[i_buffer], GL_FLOAT, GL_FALSE, 0, None )
#glDrawElements( self.__type, self.__noOfIndices, GL_UNSIGNED_INT, self.__indexArr )
if self.__type == GL_PATCHES:
glPatchParameteri( GL_PATCH_VERTICES, self.__patch_vertices )
glDrawElements( self.__type, self.__noOfIndices, GL_UNSIGNED_INT, None )
glBindVertexArray( 0 )
# vertex array object specification (stride or tight)
class MeshBuffer:
# ctor
def __init__(self, usage = GL_STATIC_DRAW):
self.__usage = usage
self.__vao = 0
self.__vbo = {}
self.__ibo = { 'ibo': 0, 'size': 0 }
# dtor
def __del__(self):
vbos = []
for i_buffer in self.__vbo:
vbos.append( self.__vbo[i_buffer]['vbo'] )
glDeleteBuffers( len(vbos), vbos )
if self.__ibo['ibo'] != 0:
glDeleteBuffers( 1, self.__ibo['ibo'] )
if self.__vao != 0:
glDeleteVertexArrays( 1, self.__vao )
def __BindVA(self):
if self.__vao == 0:
self.__vao = glGenVertexArrays( 1 )
glBindVertexArray( self.__vao )
# define a list of vertex buffers
def DefineVA(self, buffers):
self.__BindVA()
# for all buffers
for i_buffer in range(len(buffers)):
self.DefineVB(i_buffer, buffers[i_buffer])
glBindVertexArray( 0 )
# define a singel vertex buffer
def DefineVB(self, i_buffer, buffer):
if not (i_buffer in self.__vbo):
self.__vbo[i_buffer] = { 'vbo': 0, 'stride': 0, 'size': 0, 'attribs': [] }
# for all attributs in buffer
attribs, stride, data = buffer
size = len(data)
if self.__vbo[i_buffer]['vbo'] == 0:
self.__vbo[i_buffer]['vbo'] = glGenBuffers( 1 )
glBindBuffer( GL_ARRAY_BUFFER, self.__vbo[i_buffer]['vbo'] )
arrdata = numpy.array( data, dtype=numpy.float32 )
if self.__vbo[i_buffer]['size'] < size:
glBufferData( GL_ARRAY_BUFFER, arrdata, self.__usage )
self.__vbo[i_buffer]['size'] = size
else:
glBufferSubData( GL_ARRAY_BUFFER, 0, arrdata )
for i_attrib in range(len(attribs)):
attrib_index, attrib_size, attrib_offset = attribs[i_attrib]
self.__vbo[i_buffer]['attribs'].append( (attrib_index, attrib_size, attrib_offset) )
glEnableVertexAttribArray( attrib_index )
if stride==0:
glVertexAttribPointer( attrib_index, attrib_size, GL_FLOAT, GL_FALSE, stride, None )
else:
glVertexAttribPointer( attrib_index, attrib_size, GL_FLOAT, GL_FALSE, stride, attrib_offset )
glBindBuffer( GL_ARRAY_BUFFER, 0 )
def DefineIB(self, indices):
self.__BindVA()
if self.__ibo['ibo'] == 0:
self.__ibo['ibo'] = glGenBuffers( 1 )
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, self.__ibo['ibo'] )
size = len(indices)
indexdata = numpy.array( indices, dtype=numpy.uint32 )
if self.__ibo['size'] < size:
glBufferData( GL_ELEMENT_ARRAY_BUFFER, indexdata, self.__usage )
self.__ibo['size'] = size
else:
glBufferSubData( GL_ELEMENT_ARRAY_BUFFER, 0, indexdata )
glBindVertexArray( 0 )
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 ) # has to be unbind after vertex array object was unbound!
def DrawIB(self, type):
glBindVertexArray( self.__vao )
glDrawElements( type, self.__ibo['size'], GL_UNSIGNED_INT, None )
glBindVertexArray( 0 )
CLIENT_VERTEX = -1 # GL_VERTEX_ARRAY
CLIENT_NORMAL = -2 # GL_NORMAL_ARRAY
CLIENT_TEXTURE = -3 # GL_TEXTURE_COORD_ARRAY
CLIENT_COLOR = -4 # GL_COLOR_ARRAY
CLIENT_COLOR_2 = -5 # GL_SECONDARY_COLOR_ARRAY
CLIENT_INDEX = -6 # GL_INDEX_ARRAY
CLIENT_EDGE_FLAG = -7 # GL_EDGE_FLAG_ARRAY
CLIENT_FOG_COORD = -8 # GL_FOG_COORD_ARRAY
TYPE_float32 = 0 # GL_FLOAT
TYPE_float64 = 1 # GL_DOUBLE
TYPE_unit8 = 2 # GL_UNSIGNED_BYTE
TYPE_uint16 = 3 # GL_UNSIGNED_SHORT
TYPE_uint32 = 4 # GL_UNSIGNED_INT
# class which creates and manages vertex array objects with the following description
#
# <index buffer> : index of the index buffer; 0 is default; < -1 no index buffer is reqired
# <no of buffers> : the number of the require vertex buffer objects - followed by the list of vbo specifications
# {
# <array buffer index> : index of the array buffer
# <stride> : stride from one vertex attribute set to the next, in float (4 byte) units)
# <no of attributes> : number of the generic vertex attributes in the buffer - followed by the list of attribute specifications
# {
# <attribute index> : vertex attribute index or client state
# <attribute size> : number of elemts of the vertex attribute
# <attribute type> : type of an attribute
# <attribute offset> : offset of the vertex attributes from the begin of the attributes set
# }
# }
#
#
# e.g. Strided record sets:
# Vx0, Vy0, Vz0, Nx0, Ny0, Nz0, Tu0, Tv0, Vx1, Vy1, Vz1, Nx1, Ny1, Nz1, Tu1, Tv1, ....
#
# [0, 1, 0, 8, 3, -1, 3, 0, 0, -2, 3, 0, 3, -3, 2, 0, 6]
#
#
# e.g. Tightly packed vertex attributes:
# Vx0, Vy0, Vz0, Vx1, Vy1, Vz1, ....
# Nx0, Ny0, Nz0, Nx1, Ny1, Nz1, ....
# Tu0, Tv0, Tu1, Tv1 ....
#
# [0, 3, 0, 0, 1, -1, 3, 0, 0, 0, 1, 0, -2, 3, 0, 0, 0, 1, 0, -3, 2, 0, 0]
#
class DrawBuffer:
# ctor
def __init__(self, usage = GL_STREAM_DRAW):
self.__usage = usage # usage type of buffer objects GL_STATIC_DRAW, GL_DYNAMIC_DRAW, GL_STREAM_DRAW
self.__currVAO = 0 # current selected vertex array object <GPU name>
self.__currNoElems = 0 # number of elements in the curently selected vertex array object
self.__vaos = {} # map destcription -> (vertex array object <GPU name>, description)
self.__ibos = {} # map index -> ( element array buffer <GPU name>, size <count> of element array buffer )
self.__vbos = {} # list of array buffers ( array buffer <GPU name>, size <count> of array buffer )
# dtor
def __del__(self):
# delete array buffers
vbos = []
for vbo in self.__vbos: vbos.append( vbo[0] )
if len(vbos) > 0: glDeleteBuffers( len(vbos), vbos )
# delete element array buffers
ibos = []
for i_ibo in self.__ibos: ibos.append( self.__ibos[i_ibo][0] )
if len(ibos) > 0: glDeleteBuffers( len(ibos), ibos )
# delete vertex array objects
vaos = []
for i_vao in self.__vaos: vaos.append( self.__vaos[i_vao][0] )
if len(vaos) > 0: glDeleteVertexArrays( len(vaos), vaos )
# unbind any vertex array object
def UnbindVAO(self):
glBindVertexArray( 0 )
# bind the currently selected vertex array object
def BindVAO(self):
if self.__currVAO == 0:
print( "No vertex array object is selected" )
sys.exit()
glBindVertexArray( self.__currVAO )
# define and enable an array of generic vertex attribute
# or define and enable a clinet array
def DefineAndEnableAttribute(self, attr_id, attr_size, attr_type, attr_offs, stride):
opengl_type = 0
if opengl_type == TYPE_float32:
opengl_type = GL_FLOAT
elif opengl_type == TYPE_float64:
opengl_type = GL_DOUBLE
elif opengl_type == TYPE_unit8:
opengl_type = GL_UNSIGNED_BYTE
elif opengl_type == TYPE_uint16:
opengl_type = GL_UNSIGNED_SHORT
elif opengl_type == TYPE_uint32:
opengl_type = GL_UNSIGNED_INT
# define and enable an array of generic vertex attribute
if attr_id >= 0:
offset = 0
for i in range(attr_offs):
offset = offset + 1
offset = offset * 4
glVertexAttribPointer( attr_id, attr_size, GL_FLOAT, GL_FALSE, stride*4, None if stride == 0 else ctypes.cast(offset, ctypes.c_void_p) )
glEnableVertexAttribArray( attr_id )
# define an array of generic vertex attribute data
elif attr_id == CLIENT_VERTEX:
glVertexPointer( attr_size, GL_FLOAT, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_VERTEX_ARRAY )
# define an array of normals
elif attr_id == CLIENT_NORMAL:
glNormalPointer( GL_FLOAT, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_NORMAL_ARRAY )
# define an array of texture coordinates
elif attr_id == CLIENT_VERTEX:
glTexCoordPointer( attr_size, GL_FLOAT, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_TEXTURE_COORD_ARRAY )
# define an array of colors
elif attr_id == CLIENT_COLOR:
glColorPointer( attr_size, GL_FLOAT, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_COLOR_ARRAY )
# define an array of secondary colors
elif attr_id == CLIENT_COLOR_2:
glSecondaryColorPointer( attr_size, GL_FLOAT, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_SECONDARY_COLOR_ARRAY )
# define an array of color indexes
elif attr_id == CLIENT_INDEX:
glIndexPointer( attr_size, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_INDEX_ARRAY )
# define an array of edge flags
elif attr_id == CLIENT_EDGE_FLAG:
glEdgeFlagPointer ( stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_EDGE_FLAG_ARRAY )
# define an array of fog coordinates
elif attr_id == CLIENT_FOG_COORD:
glFogCoordPointer ( attr_size, stride, None if stride == 0 else attr_offs )
glEnableClientState( GL_FOG_COORD_ARRAY )
else:
print( "Illegal vertex array index or client state" )
sys.exit()
# set data to buffer
def UpdateBuffer(self, type, i_bo, bos, data ):
bo, size = bos[i_bo]
new_data = data
new_size = len(new_data)
glBindBuffer( type, bo )
if new_size > size: # the buffer has to be enlarged - slow
glBufferData( type, data, self.__usage )
bos[i_bo] = bo, new_size
else: # the buffer is lared enough - quick
glBufferSubData( type, 0, data )
return bos[i_bo][1]
# If a vertex array object with the description exists, then it is becomes the current vertex array object.
# If no vertex array object exists, which follows the description, then a new vertex array object is created and the new object is made the current object.
# Note, the vertex array object is not bound, it is unbound
def ProvideVAO(self, description):
# Create description key array
key = numpy.array( description, dtype=numpy.int8 )
hashcode = 5381
for c in key:
hashcode = hashcode * 33 + hashcode + c
# Check if a proper vertex array object already exists
if hashcode in self.__vaos:
self.__currVAO = self.__vaos[hashcode][0]
return hashcode
# Check if the required buffers are exists and create them if they are not created yet.
i_ibo = key[0]
no_of_vbo = key[1]
if i_ibo >= 0 and (i_ibo not in self.__ibos): self.__ibos[i_ibo] = (glGenBuffers( 1 ), 0)
# Create vertex array object
self.__vaos[hashcode] = (glGenVertexArrays( 1 ), key)
self.__currVAO = self.__vaos[hashcode][0]
self.BindVAO()
# Create a new vertex array opject according to the description
i_key = 2
for i_b in range(no_of_vbo):
i_vbo, stride, no_of_attr = key[i_key], key[i_key+1], key[i_key+2]
i_key = i_key + 3
if i_vbo >= 0 and (i_vbo not in self.__vbos): self.__vbos[i_vbo] = (glGenBuffers( 1 ), 0)
glBindBuffer( GL_ARRAY_BUFFER, self.__vbos[i_vbo][0] )
for i_attr in range(no_of_attr):
attr_id, attr_size, attr_type, attr_offs = key[i_key], key[i_key+1], key[i_key+2], key[i_key+3]
i_key = i_key + 4
self.DefineAndEnableAttribute( attr_id, attr_size, attr_type, attr_offs, stride )
glBindBuffer( GL_ARRAY_BUFFER, 0 )
# Associate the element array buffer (index buffer) to the vertex array object
if i_ibo >= 0:
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, self.__ibos[i_ibo][0] )
# Unbind the vertex array object
self.UnbindVAO()
# Unbinde the element array buffer
# This has to be done after the vertex array object is unbound, otherwise the association to the vertex array object would be lost.
if i_ibo >= 0:
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 )
return hashcode
# define a vertex array object
def DefineVAO(self, description, buffers, indices):
# Find or create the vertex array object
hashcode = self.ProvideVAO( description )
# update vertex attribut data
if isinstance(buffers, (list, tuple)): # check if list of buffes
for i_vbo in range(len(buffers)):
arrdata = numpy.array( buffers[i_vbo], dtype=numpy.float32 )
self.UpdateBuffer( GL_ARRAY_BUFFER, i_vbo, self.__vbos, arrdata )
else: # singe buffer
arrdata = numpy.array( buffers, dtype=numpy.float32 )
self.UpdateBuffer( GL_ARRAY_BUFFER, 0, self.__vbos, arrdata )
glBindBuffer( GL_ARRAY_BUFFER, 0 )
# update indices data
i_ibo = description[0]
if i_ibo >= 0:
indexdata = numpy.array( indices, dtype=numpy.uint32 )
self.__currNoElems = self.UpdateBuffer( GL_ELEMENT_ARRAY_BUFFER, i_ibo, self.__ibos, indexdata )
else:
self.__currNoElems = 0
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 )
# bind vertex attribute object
self.BindVAO()
return hashcode
# draw all elements of the current vertex array object
def DrawAllElements(self, type):
self.BindVAO()
glDrawElements( type, self.__currNoElems , GL_UNSIGNED_INT, None )
self.UnbindVAO()
# draw all elements of the current vertex array object
def DrawArray(self, type, start, count):
self.BindVAO()
glDrawArrays( type, start, count )
self.UnbindVAO()
|
92225d7424a1fb2059db6bd947730d882e7e0563
|
5835ec8ce289f21f0d5be130ff5a725d6fbad9ee
|
/examples/atari/ppo/atari_ppo_rnd.py
|
9dd76ed08a482371a0b156628749721cdde5c33b
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebookresearch/rlmeta
|
4f3b8649380666a0661a58e33f5cc9857c98acf9
|
06b2dc2b04c78241ba30addc60d71fba8b9aec76
|
refs/heads/main
| 2023-05-23T11:00:24.819345
| 2023-02-11T06:20:05
| 2023-02-11T06:20:05
| 438,839,283
| 296
| 24
|
MIT
| 2023-02-11T06:20:07
| 2021-12-16T02:47:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,904
|
py
|
atari_ppo_rnd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import logging
import time
import hydra
import torch
import torch.multiprocessing as mp
import rlmeta.envs.atari_wrapper as atari_wrapper
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from examples.atari.ppo.atari_ppo_rnd_model import AtariPPORNDModel
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo import PPORNDAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
@hydra.main(config_path="./conf", config_name="conf_ppo")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
logging.info(hydra_utils.config_to_json(cfg))
env = atari_wrapper.make_atari_env(**cfg.env)
model = AtariPPORNDModel(env.action_space.n,
network=cfg.network).to(cfg.train_device)
model_pool = RemotableModelPool(copy.deepcopy(model).to(cfg.infer_device),
seed=cfg.seed)
optimizer = make_optimizer(model.parameters(), **cfg.optimizer)
ctrl = Controller()
replay_buffer = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(model_pool)
r_server.add_service(replay_buffer)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
learner_model = wrap_downstream_model(model, m_server)
t_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
# During blocking evaluation we have STABLE is LATEST
e_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
learner_replay_buffer = make_remote_replay_buffer(replay_buffer,
r_server,
prefetch=cfg.prefetch)
t_actor_replay_buffer = make_remote_replay_buffer(replay_buffer, r_server)
env_fac = atari_wrapper.AtariWrapperFactory(**cfg.env)
t_agent_fac = AgentFactory(PPORNDAgent,
t_actor_model,
replay_buffer=t_actor_replay_buffer)
e_agent_fac = AgentFactory(
PPORNDAgent,
e_actor_model,
deterministic_policy=cfg.deterministic_evaluation)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_training_rollouts,
num_workers=cfg.num_training_workers,
seed=cfg.seed)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_evaluation_rollouts,
num_workers=cfg.num_evaluation_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_training_rollouts))
loops = LoopList([t_loop, e_loop])
learner = PPORNDAgent(learner_model,
replay_buffer=learner_replay_buffer,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
model_push_period=cfg.model_push_period)
servers.start()
loops.start()
learner.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = learner.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = learner.eval(cfg.num_evaluation_episodes,
keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(model.state_dict(), f"ppo_rnd_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
|
945bbf90c92a33fa8cd1bdc4e7a3f97c6660e5f9
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/psi/MissingListSeparators.py
|
7e3c715664c985d1db405d0d09a47271f4827e1d
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 11
|
py
|
MissingListSeparators.py
|
a = [1 2 3]
|
0dd9019848e0225ad87e6a9799099a653f055666
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/proba/tests/test_proba_basic.py
|
9cee3cd3814dd5f76b328a81dd67a3e501c6ced2
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 733
|
py
|
test_proba_basic.py
|
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Non-suite tests for probability distribution objects."""
__author__ = ["fkiraly"]
import pytest
from sktime.utils.validation._dependencies import _check_soft_dependencies
@pytest.mark.skipif(
not _check_soft_dependencies("tensorflow_probability", severity="none"),
reason="skip test if required soft dependency is not available",
)
def test_proba_example():
"""Test one subsetting case for BaseDistribution."""
from sktime.proba.tfp import TFNormal
n = TFNormal(mu=[[0, 1], [2, 3], [4, 5]], sigma=1)
assert n.shape == (3, 2)
one_row = n.loc[[1]]
assert isinstance(one_row, TFNormal)
assert one_row.shape == (1, 2)
|
ccb015201976f7c34de3508f8b85d99919337467
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/examplescenario.py
|
336822d5b017517e0f468870bb8fc507e7c8f564
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 67,834
|
py
|
examplescenario.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ExampleScenario
Release: R5
Version: 5.0.0
Build ID: 2aecd53
Last updated: 2023-03-26T15:21:02.749+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class ExampleScenario(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Example of workflow instance.
"""
resource_type = Field("ExampleScenario", const=True)
actor: typing.List[fhirtypes.ExampleScenarioActorType] = Field(
None,
alias="actor",
title="Individual involved in exchange",
description=(
"A system or person who shares or receives an instance within the "
"scenario."
),
# if property is element of this resource.
element_property=True,
)
contact: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the example scenario and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the example scenario."
),
# if property is element of this resource.
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
copyrightLabel: fhirtypes.String = Field(
None,
alias="copyrightLabel",
title="Copyright holder and year(s)",
description=(
"A short string (<50 characters), suitable for inclusion in a page "
"footer that identifies the copyright holder, effective period, and "
"optionally whether rights are resctricted. (e.g. 'All rights "
"reserved', 'Some rights reserved')."
),
# if property is element of this resource.
element_property=True,
)
copyrightLabel__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyrightLabel", title="Extension field for ``copyrightLabel``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the example scenario was last "
"significantly changed. The date must change when the business version "
"changes and it must change if the status code changes. In addition, it"
" should change when the substantive content of the example scenario "
"changes. (e.g. the 'content logical definition')."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the ExampleScenario",
description=(
"A free text natural language description of the ExampleScenario from a"
" consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this example scenario is authored for"
" testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Additional identifier for the example scenario",
description=(
"A formal identifier that is used to identify this example scenario "
"when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
instance: typing.List[fhirtypes.ExampleScenarioInstanceType] = Field(
None,
alias="instance",
title="Data used in the scenario",
description="A single data collection that is shared as part of the scenario.",
# if property is element of this resource.
element_property=True,
)
jurisdiction: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for example scenario (if applicable)",
description=(
"A legal or geographic region in which the example scenario is intended"
" to be used."
),
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="To be removed?",
description="Temporarily retained for tooling purposes.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
process: typing.List[fhirtypes.ExampleScenarioProcessType] = Field(
None,
alias="process",
title="Major process within scenario",
description=(
"A group of operations that represents a significant step within a "
"scenario."
),
# if property is element of this resource.
element_property=True,
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher/steward (organization or individual)",
description=(
"The name of the organization or individual responsible for the release"
" and ongoing maintenance of the example scenario."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
purpose: fhirtypes.Markdown = Field(
None,
alias="purpose",
title="The purpose of the example, e.g. to illustrate a scenario",
description=(
"What the example scenario resource is created for. This should not be "
"used to show the business purpose of the scenario itself, but the "
"purpose of documenting a scenario."
),
# if property is element of this resource.
element_property=True,
)
purpose__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_purpose", title="Extension field for ``purpose``."
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this example scenario. Enables tracking the life-cycle "
"of the content."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this example scenario (human friendly)",
description="A short, descriptive, user-friendly title for the ExampleScenario.",
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title=(
"Canonical identifier for this example scenario, represented as a URI "
"(globally unique)"
),
description=(
"An absolute URI that is used to identify this example scenario when it"
" is referenced in a specification, model, design or an instance; also "
"called its canonical identifier. This SHOULD be globally unique and "
"SHOULD be a literal address at which an authoritative instance of this"
" example scenario is (or will be) published. This URL can be the "
"target of a canonical reference. It SHALL remain the same when the "
"example scenario is stored on different servers."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate example scenario instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the example scenario",
description=(
"The identifier that is used to identify this version of the example "
"scenario when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the example scenario "
"author and is not expected to be globally unique. For example, it "
"might be a timestamp (e.g. yyyymmdd) if a managed version is not "
"available. There is also no expectation that versions can be placed in"
" a lexicographical sequence."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
versionAlgorithmCoding: fhirtypes.CodingType = Field(
None,
alias="versionAlgorithmCoding",
title="How to compare versions",
description=(
"Indicates the mechanism used to compare versions to determine which is"
" more current."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e versionAlgorithm[x]
one_of_many="versionAlgorithm",
one_of_many_required=False,
)
versionAlgorithmString: fhirtypes.String = Field(
None,
alias="versionAlgorithmString",
title="How to compare versions",
description=(
"Indicates the mechanism used to compare versions to determine which is"
" more current."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e versionAlgorithm[x]
one_of_many="versionAlgorithm",
one_of_many_required=False,
)
versionAlgorithmString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_versionAlgorithmString",
title="Extension field for ``versionAlgorithmString``.",
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenario`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"url",
"identifier",
"version",
"versionAlgorithmString",
"versionAlgorithmCoding",
"name",
"title",
"status",
"experimental",
"date",
"publisher",
"contact",
"description",
"useContext",
"jurisdiction",
"purpose",
"copyright",
"copyrightLabel",
"actor",
"instance",
"process",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1716(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1716(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"versionAlgorithm": ["versionAlgorithmCoding", "versionAlgorithmString"]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class ExampleScenarioActor(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Individual involved in exchange.
A system or person who shares or receives an instance within the scenario.
"""
resource_type = Field("ExampleScenarioActor", const=True)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Details about actor",
description="An explanation of who/what the actor is and its role in the scenario.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
key: fhirtypes.String = Field(
None,
alias="key",
title="ID or acronym of the actor",
description=(
"A unique string within the scenario that is used to reference the "
"actor."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
key__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_key", title="Extension field for ``key``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Label for actor when rendering",
description=(
"The human-readable name for the actor used when rendering the " "scenario."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="person | system",
description="The category of actor - person or system.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["person", "system"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioActor`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"key",
"type",
"title",
"description",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2224(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [
("key", "key__ext"),
("title", "title__ext"),
("type", "type__ext"),
]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ExampleScenarioInstance(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Data used in the scenario.
A single data collection that is shared as part of the scenario.
"""
resource_type = Field("ExampleScenarioInstance", const=True)
containedInstance: typing.List[
fhirtypes.ExampleScenarioInstanceContainedInstanceType
] = Field(
None,
alias="containedInstance",
title="Resources contained in the instance",
description=(
"References to other instances that can be found within this instance "
"(e.g. the observations contained in a bundle)."
),
# if property is element of this resource.
element_property=True,
)
content: fhirtypes.ReferenceType = Field(
None,
alias="content",
title="Example instance data",
description=(
"Points to an instance (typically an example) that shows the data that "
"would corespond to this instance."
),
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Human-friendly description of the instance",
description="An explanation of what the instance contains and what it's for.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
key: fhirtypes.String = Field(
None,
alias="key",
title="ID or acronym of the instance",
description=(
"A unique string within the scenario that is used to reference the "
"instance."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
key__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_key", title="Extension field for ``key``."
)
structureProfileCanonical: fhirtypes.Canonical = Field(
None,
alias="structureProfileCanonical",
title="Rules instance adheres to",
description=(
"Refers to a profile, template or other ruleset the instance adheres " "to."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e structureProfile[x]
one_of_many="structureProfile",
one_of_many_required=False,
)
structureProfileCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_structureProfileCanonical",
title="Extension field for ``structureProfileCanonical``.",
)
structureProfileUri: fhirtypes.Uri = Field(
None,
alias="structureProfileUri",
title="Rules instance adheres to",
description=(
"Refers to a profile, template or other ruleset the instance adheres " "to."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e structureProfile[x]
one_of_many="structureProfile",
one_of_many_required=False,
)
structureProfileUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_structureProfileUri",
title="Extension field for ``structureProfileUri``.",
)
structureType: fhirtypes.CodingType = Field(
...,
alias="structureType",
title="Data structure for example",
description=(
"A code indicating the kind of data structure (FHIR resource or some "
"other standard) this is an instance of."
),
# if property is element of this resource.
element_property=True,
)
structureVersion: fhirtypes.String = Field(
None,
alias="structureVersion",
title="E.g. 4.0.1",
description=(
"Conveys the version of the data structure instantiated. I.e. what "
"release of FHIR, X12, OpenEHR, etc. is instance compliant with."
),
# if property is element of this resource.
element_property=True,
)
structureVersion__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_structureVersion",
title="Extension field for ``structureVersion``.",
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Label for instance",
description=(
"A short descriptive label the instance to be used in tables or "
"diagrams."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
version: typing.List[fhirtypes.ExampleScenarioInstanceVersionType] = Field(
None,
alias="version",
title="Snapshot of instance that changes",
description="Represents the instance as it was at a specific time-point.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioInstance`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"key",
"structureType",
"structureVersion",
"structureProfileCanonical",
"structureProfileUri",
"title",
"description",
"content",
"version",
"containedInstance",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2527(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("key", "key__ext"), ("title", "title__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2527(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"structureProfile": ["structureProfileCanonical", "structureProfileUri"]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class ExampleScenarioInstanceContainedInstance(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Resources contained in the instance.
References to other instances that can be found within this instance (e.g.
the observations contained in a bundle).
"""
resource_type = Field("ExampleScenarioInstanceContainedInstance", const=True)
instanceReference: fhirtypes.String = Field(
None,
alias="instanceReference",
title="Key of contained instance",
description="A reference to the key of an instance found within this one.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
instanceReference__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_instanceReference",
title="Extension field for ``instanceReference``.",
)
versionReference: fhirtypes.String = Field(
None,
alias="versionReference",
title="Key of contained instance version",
description=(
"A reference to the key of a specific version of an instance in this "
"instance."
),
# if property is element of this resource.
element_property=True,
)
versionReference__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_versionReference",
title="Extension field for ``versionReference``.",
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioInstanceContainedInstance`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"instanceReference",
"versionReference",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_4265(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("instanceReference", "instanceReference__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ExampleScenarioInstanceVersion(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Snapshot of instance that changes.
Represents the instance as it was at a specific time-point.
"""
resource_type = Field("ExampleScenarioInstanceVersion", const=True)
content: fhirtypes.ReferenceType = Field(
None,
alias="content",
title="Example instance version data",
description=(
"Points to an instance (typically an example) that shows the data that "
"would flow at this point in the scenario."
),
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Details about version",
description=(
"An explanation of what this specific version of the instance contains "
"and represents."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
key: fhirtypes.String = Field(
None,
alias="key",
title="ID or acronym of the version",
description=(
"A unique string within the instance that is used to reference the "
"version of the instance."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
key__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_key", title="Extension field for ``key``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Label for instance version",
description=(
"A short descriptive label the version to be used in tables or " "diagrams."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioInstanceVersion`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"key",
"title",
"description",
"content",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3278(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("key", "key__ext"), ("title", "title__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ExampleScenarioProcess(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Major process within scenario.
A group of operations that represents a significant step within a scenario.
"""
resource_type = Field("ExampleScenarioProcess", const=True)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Human-friendly description of the process",
description="An explanation of what the process represents and what it does.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
postConditions: fhirtypes.Markdown = Field(
None,
alias="postConditions",
title="Status after successful completion",
description=(
"Description of the final state of the actors, environment and data "
"after the process has been successfully completed."
),
# if property is element of this resource.
element_property=True,
)
postConditions__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_postConditions", title="Extension field for ``postConditions``."
)
preConditions: fhirtypes.Markdown = Field(
None,
alias="preConditions",
title="Status before process starts",
description=(
"Description of the initial state of the actors, environment and data "
"before the process starts."
),
# if property is element of this resource.
element_property=True,
)
preConditions__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_preConditions", title="Extension field for ``preConditions``."
)
step: typing.List[fhirtypes.ExampleScenarioProcessStepType] = Field(
None,
alias="step",
title="Event within of the process",
description="A significant action that occurs as part of the process.",
# if property is element of this resource.
element_property=True,
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Label for procss",
description=(
"A short descriptive label the process to be used in tables or " "diagrams."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioProcess`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"title",
"description",
"preConditions",
"postConditions",
"step",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2455(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("title", "title__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ExampleScenarioProcessStep(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Event within of the process.
A significant action that occurs as part of the process.
"""
resource_type = Field("ExampleScenarioProcessStep", const=True)
alternative: typing.List[
fhirtypes.ExampleScenarioProcessStepAlternativeType
] = Field(
None,
alias="alternative",
title="Alternate non-typical step action",
description=(
"Indicates an alternative step that can be taken instead of the sub-"
"process, scenario or operation. E.g. to represent non-happy-"
"path/exceptional/atypical circumstances."
),
# if property is element of this resource.
element_property=True,
)
number: fhirtypes.String = Field(
None,
alias="number",
title="Sequential number of the step",
description="The sequential number of the step, e.g. 1.2.5.",
# if property is element of this resource.
element_property=True,
)
number__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_number", title="Extension field for ``number``."
)
operation: fhirtypes.ExampleScenarioProcessStepOperationType = Field(
None,
alias="operation",
title="Step is simple action",
description="The step represents a single operation invoked on receiver by sender.",
# if property is element of this resource.
element_property=True,
)
pause: bool = Field(
None,
alias="pause",
title="Pause in the flow?",
description=(
"If true, indicates that, following this step, there is a pause in the "
"flow and the subsequent step will occur at some later time (triggered "
"by some event)."
),
# if property is element of this resource.
element_property=True,
)
pause__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_pause", title="Extension field for ``pause``."
)
process: fhirtypes.ExampleScenarioProcessType = Field(
None,
alias="process",
title="Step is nested process",
description="Indicates that the step is a complex sub-process with its own steps.",
# if property is element of this resource.
element_property=True,
)
workflow: fhirtypes.Canonical = Field(
None,
alias="workflow",
title="Step is nested workflow",
description="Indicates that the step is defined by a seaparate scenario instance.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ExampleScenario"],
)
workflow__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_workflow", title="Extension field for ``workflow``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioProcessStep`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"number",
"process",
"workflow",
"operation",
"alternative",
"pause",
]
class ExampleScenarioProcessStepAlternative(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Alternate non-typical step action.
Indicates an alternative step that can be taken instead of the sub-process,
scenario or operation. E.g. to represent non-happy-
path/exceptional/atypical circumstances.
"""
resource_type = Field("ExampleScenarioProcessStepAlternative", const=True)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Human-readable description of option",
description=(
"A human-readable description of the alternative explaining when the "
"alternative should occur rather than the base step."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
step: typing.List[fhirtypes.ExampleScenarioProcessStepType] = Field(
None,
alias="step",
title="Alternative action(s)",
description=(
"Indicates the operation, sub-process or scenario that happens if the "
"alternative option is selected."
),
# if property is element of this resource.
element_property=True,
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Label for alternative",
description=(
"The label to display for the alternative that gives a sense of the "
"circumstance in which the alternative should be invoked."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioProcessStepAlternative`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "title", "description", "step"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_4004(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("title", "title__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ExampleScenarioProcessStepOperation(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Step is simple action.
The step represents a single operation invoked on receiver by sender.
"""
resource_type = Field("ExampleScenarioProcessStepOperation", const=True)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Human-friendly description of the operation",
description="An explanation of what the operation represents and what it does.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
initiator: fhirtypes.String = Field(
None,
alias="initiator",
title="Who starts the operation",
description="The system that invokes the action/transmits the data.",
# if property is element of this resource.
element_property=True,
)
initiator__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_initiator", title="Extension field for ``initiator``."
)
initiatorActive: bool = Field(
None,
alias="initiatorActive",
title="Initiator stays active?",
description="If false, the initiator is deactivated right after the operation.",
# if property is element of this resource.
element_property=True,
)
initiatorActive__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_initiatorActive", title="Extension field for ``initiatorActive``."
)
receiver: fhirtypes.String = Field(
None,
alias="receiver",
title="Who receives the operation",
description="The system on which the action is invoked/receives the data.",
# if property is element of this resource.
element_property=True,
)
receiver__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_receiver", title="Extension field for ``receiver``."
)
receiverActive: bool = Field(
None,
alias="receiverActive",
title="Receiver stays active?",
description="If false, the receiver is deactivated right after the operation.",
# if property is element of this resource.
element_property=True,
)
receiverActive__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_receiverActive", title="Extension field for ``receiverActive``."
)
request: fhirtypes.ExampleScenarioInstanceContainedInstanceType = Field(
None,
alias="request",
title="Instance transmitted on invocation",
description=(
"A reference to the instance that is transmitted from requester to "
"receiver as part of the invocation of the operation."
),
# if property is element of this resource.
element_property=True,
)
response: fhirtypes.ExampleScenarioInstanceContainedInstanceType = Field(
None,
alias="response",
title="Instance transmitted on invocation response",
description=(
"A reference to the instance that is transmitted from receiver to "
"requester as part of the operation's synchronous response (if any)."
),
# if property is element of this resource.
element_property=True,
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Label for step",
description="A short descriptive label the step to be used in tables or diagrams.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
type: fhirtypes.CodingType = Field(
None,
alias="type",
title="Kind of action",
description="The standardized type of action (FHIR or otherwise).",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ExampleScenarioProcessStepOperation`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"type",
"title",
"initiator",
"receiver",
"description",
"initiatorActive",
"receiverActive",
"request",
"response",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3807(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("title", "title__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
|
a8b868e35d6dc154b2deaf3092f6e5bf34163742
|
7d689ca6fa3fcea87bc636719fcc5594730057d7
|
/mollie/api/objects/balance_report.py
|
5b2adf939234bcf352eb493416b1b02b26216bed
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
mollie/mollie-api-python
|
fbbb3c05a7867b122c26debc05a5a91c16169748
|
e0d553aee4b52c3c7aabba2ff17ccc67e760635a
|
refs/heads/master
| 2023-08-27T19:35:19.487382
| 2023-07-28T13:20:43
| 2023-07-28T13:20:43
| 19,978,672
| 112
| 79
|
BSD-2-Clause
| 2023-08-31T15:35:55
| 2014-05-20T11:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
balance_report.py
|
from typing import TYPE_CHECKING, Any
from .base import ObjectBase
if TYPE_CHECKING:
from ..client import Client
from ..resources import BalanceReports
class BalanceReport(ObjectBase):
@classmethod
def get_resource_class(cls, client: "Client", **kwargs: Any) -> "BalanceReports":
from ..resources import BalanceReports
balance = kwargs["balance"]
return BalanceReports(client, balance)
@property
def resource(self):
return self._get_property("resource")
@property
def balance_id(self):
return self._get_property("balanceId")
@property
def time_zone(self):
return self._get_property("timeZone")
@property
def from_(self):
# 'from' is a reserverd word in Python, thus 'from_' is used.
return self._get_property("from")
@property
def until(self):
return self._get_property("until")
@property
def grouping(self):
return self._get_property("grouping")
@property
def totals(self):
return self._get_property("totals")
|
66fe9d65dd5ddd750c5a7e153a5d7076516c4575
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/analyses/analysis.py
|
fed02a1ab5081c8bd9cd064ae4eede2f20f686fe
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 12,277
|
py
|
analysis.py
|
# ruff: noqa: F401
import functools
import sys
import contextlib
from collections import defaultdict
from inspect import Signature
from typing import TYPE_CHECKING, TypeVar, Type, Generic, Callable, Optional
import logging
import time
import typing
from rich import progress
from ..misc.plugins import PluginVendor, VendorPreset
from ..misc.ux import deprecated
if TYPE_CHECKING:
from ..knowledge_base import KnowledgeBase
from ..project import Project
from typing_extensions import ParamSpec
from .identifier import Identifier
from .callee_cleanup_finder import CalleeCleanupFinder
from .vsa_ddg import VSA_DDG
from .cdg import CDG
from .bindiff import BinDiff
from .cfg import CFGEmulated
from .cfg import CFBlanket
from .cfg import CFG
from .cfg import CFGFast
from .static_hooker import StaticHooker
from .ddg import DDG
from .congruency_check import CongruencyCheck
from .reassembler import Reassembler
from .backward_slice import BackwardSlice
from .binary_optimizer import BinaryOptimizer
from .vfg import VFG
from .loopfinder import LoopFinder
from .disassembly import Disassembly
from .veritesting import Veritesting
from .code_tagging import CodeTagging
from .boyscout import BoyScout
from .variable_recovery import VariableRecoveryFast
from .variable_recovery import VariableRecovery
from .reaching_definitions import ReachingDefinitionsAnalysis
from .complete_calling_conventions import CompleteCallingConventionsAnalysis
from .decompiler.clinic import Clinic
from .propagator import PropagatorAnalysis
from .calling_convention import CallingConventionAnalysis
from .decompiler.decompiler import Decompiler
from .xrefs import XRefsAnalysis
AnalysisParams = ParamSpec("AnalysisParams")
l = logging.getLogger(name=__name__)
class AnalysisLogEntry:
def __init__(self, message, exc_info=False):
if exc_info:
(e_type, value, traceback) = sys.exc_info()
self.exc_type = e_type
self.exc_value = value
self.exc_traceback = traceback
else:
self.exc_type = None
self.exc_value = None
self.exc_traceback = None
self.message = message
def __getstate__(self):
return (
str(self.__dict__.get("exc_type")),
str(self.__dict__.get("exc_value")),
str(self.__dict__.get("exc_traceback")),
self.message,
)
def __setstate__(self, s):
self.exc_type, self.exc_value, self.exc_traceback, self.message = s
def __repr__(self):
if self.exc_type is None:
msg_str = repr(self.message)
if len(msg_str) > 70:
msg_str = msg_str[:66] + "..."
if msg_str[0] in ('"', "'"):
msg_str += msg_str[0]
return "<AnalysisLogEntry %s>" % msg_str
else:
msg_str = repr(self.message)
if len(msg_str) > 40:
msg_str = msg_str[:36] + "..."
if msg_str[0] in ('"', "'"):
msg_str += msg_str[0]
return f"<AnalysisLogEntry {msg_str} with {self.exc_type.__name__}: {self.exc_value}>"
A = TypeVar("A", bound="Analysis")
class AnalysesHub(PluginVendor[A]):
"""
This class contains functions for all the registered and runnable analyses,
"""
def __init__(self, project):
super().__init__()
self.project = project
@deprecated()
def reload_analyses(self): # pylint: disable=no-self-use
return
def _init_plugin(self, plugin_cls: Type[A]) -> "AnalysisFactory[A]":
return AnalysisFactory(self.project, plugin_cls)
def __getstate__(self):
s = super().__getstate__()
return (s, self.project)
def __setstate__(self, sd):
s, self.project = sd
super().__setstate__(s)
def __getitem__(self, plugin_cls: Type[A]) -> "AnalysisFactory[A]":
return AnalysisFactory(self.project, plugin_cls)
class KnownAnalysesPlugin(typing.Protocol):
Identifier: "Type[Identifier]"
CalleeCleanupFinder: "Type[CalleeCleanupFinder]"
VSA_DDG: "Type[VSA_DDG]"
CDG: "Type[CDG]"
BinDiff: "Type[BinDiff]"
CFGEmulated: "Type[CFGEmulated]"
CFB: "Type[CFBlanket]"
CFBlanket: "Type[CFBlanket]"
CFG: "Type[CFG]"
CFGFast: "Type[CFGFast]"
StaticHooker: "Type[StaticHooker]"
DDG: "Type[DDG]"
CongruencyCheck: "Type[CongruencyCheck]"
Reassembler: "Type[Reassembler]"
BackwardSlice: "Type[BackwardSlice]"
BinaryOptimizer: "Type[BinaryOptimizer]"
VFG: "Type[VFG]"
LoopFinder: "Type[LoopFinder]"
Disassembly: "Type[Disassembly]"
Veritesting: "Type[Veritesting]"
CodeTagging: "Type[CodeTagging]"
BoyScout: "Type[BoyScout]"
VariableRecoveryFast: "Type[VariableRecoveryFast]"
VariableRecovery: "Type[VariableRecovery]"
ReachingDefinitions: "Type[ReachingDefinitionsAnalysis]"
CompleteCallingConventions: "Type[CompleteCallingConventionsAnalysis]"
Clinic: "Type[Clinic]"
Propagator: "Type[PropagatorAnalysis]"
CallingConvention: "Type[CallingConventionAnalysis]"
Decompiler: "Type[Decompiler]"
XRefs: "Type[XRefsAnalysis]"
class AnalysesHubWithDefault(AnalysesHub, KnownAnalysesPlugin):
"""
This class has type-hinting for all built-in analyses plugin
"""
class AnalysisFactory(Generic[A]):
def __init__(self, project: "Project", analysis_cls: Type[A]):
self._project = project
self._analysis_cls = analysis_cls
self.__doc__ = ""
self.__doc__ += analysis_cls.__doc__ or ""
self.__doc__ += analysis_cls.__init__.__doc__ or ""
self.__call__.__func__.__signature__ = Signature.from_callable(analysis_cls.__init__)
def prep(
self,
fail_fast=False,
kb: Optional["KnowledgeBase"] = None,
progress_callback: Optional[Callable] = None,
show_progressbar: bool = False,
) -> Type[A]:
@functools.wraps(self._analysis_cls.__init__)
def wrapper(*args, **kwargs):
oself = object.__new__(self._analysis_cls)
oself.named_errors = defaultdict(list)
oself.errors = []
oself.log = []
oself._fail_fast = fail_fast
oself._name = self._analysis_cls.__name__
oself.project = self._project
oself.kb = kb or self._project.kb
oself._progress_callback = progress_callback
oself._show_progressbar = show_progressbar
oself.__init__(*args, **kwargs)
return oself
return wrapper # type: ignore
def __call__(self, *args, **kwargs) -> A:
fail_fast = kwargs.pop("fail_fast", False)
kb = kwargs.pop("kb", self._project.kb)
progress_callback = kwargs.pop("progress_callback", None)
show_progressbar = kwargs.pop("show_progressbar", False)
w = self.prep(
fail_fast=fail_fast, kb=kb, progress_callback=progress_callback, show_progressbar=show_progressbar
)
r = w(*args, **kwargs)
# clean up so that it's always pickleable
r._progressbar = None
return r
class Analysis:
"""
This class represents an analysis on the program.
:ivar project: The project for this analysis.
:type project: angr.Project
:ivar KnowledgeBase kb: The knowledgebase object.
:ivar _progress_callback: A callback function for receiving the progress of this analysis. It only takes
one argument, which is a float number from 0.0 to 100.0 indicating the current
progress.
:ivar bool _show_progressbar: If a progressbar should be shown during the analysis. It's independent from
_progress_callback.
:ivar progress.Progress _progressbar: The progress bar object.
"""
project: "Project"
kb: "KnowledgeBase"
_fail_fast: bool
_name: str
errors = []
named_errors = defaultdict(list)
_progress_callback = None
_show_progressbar = False
_progressbar = None
_task = None
_PROGRESS_WIDGETS = [
progress.TaskProgressColumn(),
progress.BarColumn(),
progress.TextColumn("Elapsed Time:"),
progress.TimeElapsedColumn(),
progress.TextColumn("Time:"),
progress.TimeRemainingColumn(),
progress.TextColumn("{task.description}"),
]
@contextlib.contextmanager
def _resilience(self, name=None, exception=Exception):
try:
yield
except exception: # pylint:disable=broad-except
if self._fail_fast:
raise
else:
error = AnalysisLogEntry("exception occurred", exc_info=True)
l.error("Caught and logged %s with resilience: %s", error.exc_type.__name__, error.exc_value)
if name is None:
self.errors.append(error)
else:
self.named_errors[name].append(error)
def _initialize_progressbar(self):
"""
Initialize the progressbar.
:return: None
"""
self._progressbar = progress.Progress(*self._PROGRESS_WIDGETS)
self._task = self._progressbar.add_task(total=100, description="")
self._progressbar.start()
def _update_progress(self, percentage, text=None, **kwargs):
"""
Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:param kwargs: Other parameters that will be passed to the progress_callback handler.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
self._progressbar.update(self._task, completed=percentage)
if text is not None and self._progressbar:
self._progressbar.update(self._task, description=text)
if self._progress_callback is not None:
self._progress_callback(percentage, text=text, **kwargs) # pylint:disable=not-callable
def _finish_progress(self):
"""
Mark the progressbar as finished.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
if self._progressbar is not None:
self._progressbar.update(self._task, completed=100)
self._progressbar.stop()
self._progressbar = None
if self._progress_callback is not None:
self._progress_callback(100.0) # pylint:disable=not-callable
@staticmethod
def _release_gil(ctr, freq, sleep_time=0.001):
"""
Periodically calls time.sleep() and releases the GIL so other threads (like, GUI threads) have a much better
chance to be scheduled, and other critical components (like the GUI) can be kept responsiveness.
This is, of course, a hack before we move all computational intensive tasks to pure C++ implementations.
:param int ctr: A number provided by the caller.
:param int freq: How frequently time.sleep() should be called. time.sleep() is called when ctr % freq == 0.
:param sleep_time: Number (or fraction) of seconds to sleep.
:return: None
"""
if ctr != 0 and ctr % freq == 0:
time.sleep(sleep_time)
def __getstate__(self):
d = dict(self.__dict__)
if "_progressbar" in d:
del d["_progressbar"]
if "_progress_callback" in d:
del d["_progress_callback"]
if "_statusbar" in d:
del d["_statusbar"]
return d
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
return f"<{self._name} Analysis Result at {id(self):#x}>"
default_analyses = VendorPreset()
AnalysesHub.register_preset("default", default_analyses)
|
ee7929a5ce095fabcb49a0a5145e5ecc82538419
|
802b23f75579f7c4855c675ff2e93fcb93f4d6e2
|
/src/exception_process.py
|
aa06f2339effd149bd17c36696916275ddc8a633
|
[] |
no_license
|
s3team/uroboros
|
d7daa046e02e26e5b6b2d2f52b9ecf0e2589e2de
|
c074cca980eb94cc16735f1e568b08bb308001fc
|
refs/heads/master
| 2021-12-24T18:48:54.807768
| 2021-12-14T19:26:26
| 2021-12-14T19:26:26
| 44,036,165
| 201
| 60
| null | 2021-09-19T07:06:38
| 2015-10-11T02:21:47
|
OCaml
|
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
exception_process.py
|
import re, sys
lines = []
fn = sys.argv[1]
with open(fn) as f:
lines = f.readlines()
lines = lines[2:-2]
for i in range(len(lines)):
l = lines[i]
if ";" in l:
lines[i] = l.split(';')[0]
text_b = 0
text_e = 0
with open('text_sec.info') as f:
l = f.readlines()[0]
text_b = int(l.split()[1], 16)
text_e = int(l.split()[3], 16) + text_b
tbl_b = int(lines[0].split()[0],16)
tbl_e = int(lines[-1].split()[0],16)
text_labels = []
local_labels = []
def update_label1 (d):
if d >= text_b and d < text_e:
text_labels.append("S_0x"+hex(d)[2:].upper()+"\n")
return True
elif d >= tbl_b and d < tbl_e:
local_labels.append(d)
return True
else:
return False
def update_label2 (s):
addr = s.split('_')[1]
update_label1(int(addr,16))
return "S_0x"+addr.upper()
# addr, type, cont
parsed_ls = []
def pat_match1(s):
pat1 = r'[0-9A-F]{7}h'
ms = re.search(pat1, s)
if ms:
msd = int(ms.group(0)[:-1], 16)
if update_label1(msd):
s1 = "S_0x"+hex(msd)[2:].upper()
s = s.replace(ms.group(0),s1)
return s
else:
pat1 = r'[0-9A-F]+h' # pure number
for mp in re.findall(pat1, s):
s = s.replace(mp,"0x"+mp[:-1])
return s
def pat_match2(s):
pat2 = r'[a-z]+_[0-9A-F]+'
ms = re.search(pat2, s)
if ms:
if "S_" not in ms.group(0):
t = ms.group(0)
elif "S_" not in ms.group(1):
t = ms.group(1)
else:
print "failed : "+s
s1 = update_label2(t)
s = s.replace(t,s1)
return s
def pat_match3(s):
def help(t,n):
s = ""
for i in range(0,int(t,16)):
s += n + ", "
return s[:-2]
pat1 = r'\d+ dup\(\d\)'
for mp in re.findall(pat1, s):
pat2 = r'(\d+) dup\((\d)\)'
mp1 = re.search(pat2, mp)
t = mp1.group(1)
n = mp1.group(2)
s = s.replace(mp,help(t,n))
return s
def parse(s):
if "@@CXXABI" in s:
return s.split('@')[0]
else:
s1 = pat_match1(s)
s2 = pat_match2(s1)
s3 = pat_match3(s2)
return s3
def typ_trans(t):
if 'dd' in t:
return ".long"
elif 'db' in t:
return ".byte"
elif 'string' in t:
return t
else:
print "unsupported type trans : " + t
for l in lines:
has_off = False
is_str = False
if "offset" in l:
has_off = True
l = l.replace('offset','')
if "'" in l:
print l
l = l.replace("'", '"')
is_str = True
items = l.strip().split()
# only have address
if len(items) == 1:
continue
addr = int(items[0],16)
if "dd" == items[1] or "db" == items[1]:
label = ""
if has_off == False:
typ = items[1]
else:
typ = "dd"
cont = parse(' '.join(items[2:]))
else:
label = ""
if has_off == False:
typ = items[2]
else:
typ = "dd"
cont = parse(' '.join(items[3:]))
if is_str == True:
cont = cont.replace(',0','')
typ = '.string'
parsed_ls.append([addr,label,typ,cont])
for i in range(0,len(parsed_ls)):
l = parsed_ls[i]
if l[0] in local_labels:
l[1] = "S_0x"+hex(l[0])[2:].upper()+": "
l[2] = typ_trans(l[2])
parsed_ls[i] = l
# print parsed_ls
with open(fn+'.data', 'w') as f:
f.write('.section .'+fn+',"aw",@progbits\n.align 4\n')
f.writelines(map(lambda l : l[1]+" "+l[2]+" "+l[3]+"\n", parsed_ls))
with open(fn+'.info', 'w') as f:
f.writelines(set(text_labels))
|
71d04c68cb89d31f71d85117a6ef2dc2d185f302
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/CommonPrizeModelVo.py
|
6493927717528d63bc860aafc891cfa8c7c57688
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
CommonPrizeModelVo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CommonPrizeModelVo(object):
def __init__(self):
self._prize_desc_image_url = None
self._prize_down_desc_text = None
self._prize_name = None
@property
def prize_desc_image_url(self):
return self._prize_desc_image_url
@prize_desc_image_url.setter
def prize_desc_image_url(self, value):
self._prize_desc_image_url = value
@property
def prize_down_desc_text(self):
return self._prize_down_desc_text
@prize_down_desc_text.setter
def prize_down_desc_text(self, value):
self._prize_down_desc_text = value
@property
def prize_name(self):
return self._prize_name
@prize_name.setter
def prize_name(self, value):
self._prize_name = value
def to_alipay_dict(self):
params = dict()
if self.prize_desc_image_url:
if hasattr(self.prize_desc_image_url, 'to_alipay_dict'):
params['prize_desc_image_url'] = self.prize_desc_image_url.to_alipay_dict()
else:
params['prize_desc_image_url'] = self.prize_desc_image_url
if self.prize_down_desc_text:
if hasattr(self.prize_down_desc_text, 'to_alipay_dict'):
params['prize_down_desc_text'] = self.prize_down_desc_text.to_alipay_dict()
else:
params['prize_down_desc_text'] = self.prize_down_desc_text
if self.prize_name:
if hasattr(self.prize_name, 'to_alipay_dict'):
params['prize_name'] = self.prize_name.to_alipay_dict()
else:
params['prize_name'] = self.prize_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CommonPrizeModelVo()
if 'prize_desc_image_url' in d:
o.prize_desc_image_url = d['prize_desc_image_url']
if 'prize_down_desc_text' in d:
o.prize_down_desc_text = d['prize_down_desc_text']
if 'prize_name' in d:
o.prize_name = d['prize_name']
return o
|
df541b6d78d7c526f503d0010ec1f65757eed11b
|
a4515918f56dd7ab527e4999aa7fce818b6dd6f6
|
/Algorithms/Cryptography/rc4.py
|
74fc3885f7346c233d9c63cb50e5b82b85852429
|
[
"MIT"
] |
permissive
|
rathoresrikant/HacktoberFestContribute
|
0e2d4692a305f079e5aebcd331e8df04b90f90da
|
e2a69e284b3b1bd0c7c16ea41217cc6c2ec57592
|
refs/heads/master
| 2023-06-13T09:22:22.554887
| 2021-10-27T07:51:41
| 2021-10-27T07:51:41
| 151,832,935
| 102
| 901
|
MIT
| 2023-06-23T06:53:32
| 2018-10-06T11:23:31
|
C++
|
UTF-8
|
Python
| false
| false
| 945
|
py
|
rc4.py
|
def run_rc4(text,key):
resultado = []
for char in text:
resultado.append(rc4(char,key))
return bytearray(resultado)
def rc4(value,key):
SJ = KSA(key)
generatedByte = GenFluxo(SJ[0])
return value ^ next(generatedByte)
def KSA(key):
S = []
T = []
for i in range(256):
S.append(i)
T.append(i % key)
j = 0
for i in range(256):
j = (j + S[i] + T[i]) % 256
swap(S,i,j)
return (S,T)
def GenFluxo(S):
i = 0
j = 0
while(True):
i = (i+1) % 256
j = (j + S[i]) % 256
swap(S,i,j)
K = S[(S[i] + S[j]) % 256]
yield K
def swap(lista,index1,index2):
tmp = lista[index1]
lista[index1] = lista[index2]
lista[index1] = tmp
if __name__ == "__main__":
input_val = bytearray("encrypted text","utf-8")
print(run_rc4(input_val,4))
print(run_rc4(run_rc4(input_val,4),4).decode("utf-8"))
|
8b090d711871a5901506bb58ae87c44c2dea34eb
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/shopify/tests.py
|
415f7cdc1cf4f6433d69703d6c5b14b039fd4966
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 5,506
|
py
|
tests.py
|
import json
from urllib.parse import parse_qs, urlparse
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.http import urlencode
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase, mocked_response
from .provider import ShopifyProvider
class ShopifyTests(OAuth2TestsMixin, TestCase):
provider_id = ShopifyProvider.id
def _complete_shopify_login(self, q, resp, resp_mock, with_refresh_token):
complete_url = reverse(self.provider.id + "_callback")
self.assertGreater(q["redirect_uri"][0].find(complete_url), 0)
response_json = self.get_login_response_json(
with_refresh_token=with_refresh_token
)
with mocked_response(
MockedResponse(200, response_json, {"content-type": "application/json"}),
resp_mock,
):
resp = self.client.get(
complete_url,
{
"code": "test",
"state": q["state"][0],
"shop": "test",
},
)
return resp
def login(self, resp_mock, process="login", with_refresh_token=True):
url = (
reverse(self.provider.id + "_login")
+ "?"
+ urlencode({"process": process, "shop": "test"})
)
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
p = urlparse(resp["location"])
q = parse_qs(p.query)
resp = self._complete_shopify_login(q, resp, resp_mock, with_refresh_token)
return resp
def get_mocked_response(self):
return MockedResponse(
200,
"""
{
"shop": {
"id": "1234566",
"name": "Test Shop",
"email": "email@example.com"
}
}
""",
)
@override_settings(SOCIALACCOUNT_PROVIDERS={"shopify": {"IS_EMBEDDED": True}})
class ShopifyEmbeddedTests(ShopifyTests):
"""
Shopify embedded apps (that run within an iFrame) require a JS (not server)
redirect for starting the oauth2 process.
See Also:
https://help.shopify.com/api/sdks/embedded-app-sdk/getting-started#oauth
"""
def login(self, resp_mock, process="login", with_refresh_token=True):
resp = self.client.post(
reverse(self.provider.id + "_login")
+ "?"
+ urlencode({"process": process, "shop": "test"}),
)
self.assertEqual(resp.status_code, 200) # No re-direct, JS must do it
actual_content = resp.content.decode("utf8")
self.assertTrue(
"script" in actual_content,
"Content missing script tag. [Actual: {}]".format(actual_content),
)
self.assertTrue(
resp.xframe_options_exempt,
"Redirect JS must be allowed to run in Shopify iframe",
)
self.assertTrue(
"<!DOCTYPE html><html><head>" in actual_content
and "</head><body></body></html>" in actual_content,
"Expected standard HTML skeleton. [Actual: {}]".format(actual_content),
)
p = urlparse(
actual_content.split(";</script>")[0].split('location.href = "')[1]
)
q = parse_qs(p.query)
resp = self._complete_shopify_login(q, resp, resp_mock, with_refresh_token)
return resp
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"shopify": {"AUTH_PARAMS": {"grant_options[]": "per-user"}}
}
)
class ShopifyPerUserAccessTests(ShopifyTests):
"""
Shopify has two access modes, offline (the default) and online/per-user.
Enabling 'online' access should cause all-auth to tie the logged in
Shopify user to the all-auth account (rather than the shop as a whole).
See Also:
https://help.shopify.com/api/getting-started/authentication/
oauth#api-access-modes
"""
def get_login_response_json(self, with_refresh_token=True):
response_data = {
"access_token": "testac",
"scope": "write_orders,read_customers",
"expires_in": 86399,
"associated_user_scope": "write_orders",
"associated_user": {
"id": 902541635,
"first_name": "Jon",
"last_name": "Smith",
"email": "jon@example.com",
"account_owner": True,
},
}
if with_refresh_token:
response_data["refresh_token"] = "testrf"
return json.dumps(response_data)
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
SOCIALACCOUNT_EMAIL_REQUIRED=True,
ACCOUNT_EMAIL_REQUIRED=True,
)
def test_associated_user(self):
resp_mocks = self.get_mocked_response()
resp = self.login(resp_mocks)
self.assertRedirects(resp, "/accounts/profile/", fetch_redirect_response=False)
social_account = SocialAccount.objects.filter(
provider=self.provider.id,
uid=902541635,
).first()
self.assertIsNotNone(social_account)
self.assertTrue("associated_user" in social_account.extra_data)
self.assertEqual(social_account.user.email, "jon@example.com")
self.assertEqual(social_account.user.first_name, "Jon")
self.assertEqual(social_account.user.last_name, "Smith")
|
cd35d2c9b475958c6ce964d66c708f4168236e68
|
28cf7b16dd29a5802d09b44b0186f6ae2c5ff0ed
|
/kuryr_kubernetes/tests/fake.py
|
57e00ea31a8fab19038cc32641f4d0ac78a187f2
|
[
"Apache-2.0"
] |
permissive
|
openstack/kuryr-kubernetes
|
c292826abfb8aa0d3f8ef3b1007362162db16956
|
4993c7a4b2d7e4b053832bf39602f2573fad6266
|
refs/heads/master
| 2023-08-18T19:21:02.487908
| 2023-08-03T13:58:11
| 2023-08-03T13:58:11
| 58,626,548
| 169
| 78
|
Apache-2.0
| 2022-04-13T02:27:52
| 2016-05-12T09:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 6,527
|
py
|
fake.py
|
# Copyright (c) 2017 Red Hat.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import port as os_port
from openstack.network.v2 import security_group_rule as os_sgr
from os_vif import objects as osv_objects
from os_vif.objects import vif as osv_vif
from oslo_serialization import jsonutils
from kuryr_kubernetes import constants
def _fake_vif(cls=osv_vif.VIFOpenVSwitch):
vif = cls(
id=uuid.uuid4(),
vif_name='h_interface',
bridge_name='bridge',
address='3e:94:b7:31:a0:83',
port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch(
interface_id='89eccd45-43e9-43d8-b4cc-4c13db13f782',
profile_id=str(uuid.uuid4()),
),
)
vif.network = osv_objects.network.Network(id=uuid.uuid4(), mtu=1)
subnet = osv_objects.subnet.Subnet(
uuid=uuid.uuid4(),
dns=['192.168.0.1'],
cidr='192.168.0.0/24',
gateway='192.168.0.1',
routes=osv_objects.route.RouteList(objects=[]),
)
subnet.ips = osv_objects.fixed_ip.FixedIPList(objects=[])
subnet.ips.objects.append(
osv_objects.fixed_ip.FixedIP(address='192.168.0.2'))
vif.network.subnets.objects.append(subnet)
vif.active = True
return vif
def _fake_vif_dict(obj=None):
if obj:
return obj.obj_to_primitive()
else:
return _fake_vif().obj_to_primitive()
def _fake_vif_string(dictionary=None):
if dictionary:
return jsonutils.dumps(dictionary)
else:
return jsonutils.dumps(_fake_vif_dict())
def _fake_vifs(cls=osv_vif.VIFOpenVSwitch, prefix='eth'):
return {'eth0': _fake_vif(cls), prefix+'1': _fake_vif(cls)}
def _fake_vifs_dict(obj=None):
if obj:
return {
ifname: vif.obj_to_primitive() for
ifname, vif in obj.items()
}
else:
return {
ifname: vif.obj_to_primitive() for
ifname, vif in _fake_vifs().items()
}
def _fake_vifs_string(dictionary=None):
if dictionary:
return jsonutils.dumps(dictionary)
else:
return jsonutils.dumps(_fake_vifs_dict())
def get_port_obj(port_id='07cfe856-11cc-43d9-9200-ff4dc02d3620',
device_owner='compute:kuryr', ip_address=None,
vif_details=None, **kwargs):
fixed_ips = [{'subnet_id': 'e1942bb1-5f51-4646-9885-365b66215592',
'ip_address': '10.10.0.5'},
{'subnet_id': '4894baaf-df06-4a54-9885-9cd99d1cc245',
'ip_address': 'fd35:7db5:e3fc:0:f816:3eff:fe80:d421'}]
if ip_address:
fixed_ips[0]['ip_address'] = ip_address
security_group_ids = ['cfb3dfc4-7a43-4ba1-b92d-b8b2650d7f88']
if not vif_details:
vif_details = {'port_filter': True, 'ovs_hybrid_plug': False}
port_data = {'allowed_address_pairs': [],
'binding_host_id': 'kuryr-devstack',
'binding_profile': {},
'binding_vif_details': vif_details,
'binding_vif_type': 'ovs',
'binding_vnic_type': 'normal',
'created_at': '2017-06-09T13:23:24Z',
'data_plane_status': None,
'description': '',
'device_id': '',
'device_owner': device_owner,
'dns_assignment': None,
'dns_domain': None,
'dns_name': None,
'extra_dhcp_opts': [],
'fixed_ips': fixed_ips,
'id': port_id,
'ip_address': None,
'is_admin_state_up': True,
'is_port_security_enabled': True,
'location': None,
'mac_address': 'fa:16:3e:80:d4:21',
'name': constants.KURYR_PORT_NAME,
'network_id': 'ba44f957-c467-412b-b985-ae720514bc46',
'option_name': None,
'option_value': None,
'project_id': 'b6e8fb2bde594673923afc19cf168f3a',
'qos_policy_id': None,
'revision_number': 9,
'security_group_ids': security_group_ids,
'status': u'DOWN',
'subnet_id': None,
'tags': [],
'trunk_details': None,
'updated_at': u'2019-12-04T15:06:09Z'}
port_data.update(kwargs)
return os_port.Port(**port_data)
def get_sgr_obj(sgr_id='7621d1e0-a2d2-4496-94eb-ffd375d20877',
sg_id='cfb3dfc4-7a43-4ba1-b92d-b8b2650d7f88',
protocol='tcp', direction='ingress'):
sgr_data = {'description': '',
'direction': direction,
'ether_type': 'IPv4',
'id': sgr_id,
'port_range_max': 8080,
'port_range_min': 8080,
'project_id': '5ea46368c7fe436bb8732738c149fbce',
'protocol': protocol,
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': sg_id,
'tenant_id': '5ea46368c7fe436bb8732738c149fbce'}
return os_sgr.SecurityGroupRule(**sgr_data)
def get_k8s_pod(name='pod-5bb648d658-55n76', namespace='namespace',
uid='683da866-6bb1-4da2-bf6a-a5f4137c38e7'):
return {'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'creationTimestamp': '2020-12-22T09:04:29Z',
'finalizers': ['kuryr.openstack.org/pod-finalizer'],
'generateName': 'pod-5bb648d658-',
'labels': {'app': 'pod',
'pod-template-hash': '5bb648d658'},
'operation': 'Update',
'name': name,
'namespace': namespace,
'resourceVersion': '19416',
'uid': uid},
'spec': {},
'status': {}}
|
0809b0450ab9cff6989bd189dc15abcc7aebcb12
|
80293519e9f36c9f5772f7e3615de912a2a48a9f
|
/Hackathon_Solutions/ZS-Patient Drug-Switch Prediction/Tirthankar Das/Model.py
|
e5d6e69c54668ed0137318fe7280032f77fe00b1
|
[] |
no_license
|
analyticsindiamagazine/MachineHack
|
95362a677140051e10c1555f0a46b98a47383972
|
6d15adec5db64d4a7629f9e1aa27e11ca730c5ae
|
refs/heads/master
| 2023-02-18T20:20:37.787343
| 2022-10-12T09:44:20
| 2022-10-12T09:44:20
| 180,506,240
| 396
| 259
| null | 2023-02-11T01:15:53
| 2019-04-10T05:11:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 17,655
|
py
|
Model.py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
import scipy
from sklearn.metrics import roc_curve, auc
# # Feature Generation for Train:
# In[ ]:
train = pd.read_csv("train_data.csv")
patient=pd.DataFrame(train.patient_id.unique())
patient.columns=['patient_id']
# Recency:
pat_event=train.groupby(['patient_id', 'event_name'])['event_time'].min().reset_index()
pat_event2=pat_event.pivot(index='patient_id', columns='event_name', values='event_time').reset_index()
pat_event2= pat_event2.rename_axis(None, axis=1)
del pat_event
pat_spcl=train.groupby(['patient_id', 'specialty'])['event_time'].min().reset_index()
pat_spcl2=pat_spcl.pivot(index='patient_id', columns='specialty', values='event_time').reset_index()
pat_spcl2= pat_spcl2.rename_axis(None, axis=1)
del pat_spcl
pat_pln=train.groupby(['patient_id', 'plan_type'])['event_time'].min().reset_index()
pat_pln2=pat_pln.pivot(index='patient_id', columns='plan_type', values='event_time').reset_index()
pat_pln2= pat_pln2.rename_axis(None, axis=1)
del pat_pln
pat_event2=pat_event2.add_prefix('recency__event_name__')
pat_event2 = pat_event2.rename(columns = {'recency__event_name__patient_id': "patient_id"})
pat_spcl2=pat_spcl2.add_prefix('recency__specialty__')
pat_spcl2 = pat_spcl2.rename(columns = {'recency__specialty__patient_id': "patient_id"})
pat_pln2=pat_pln2.add_prefix('recency__plan_type__')
pat_pln2 = pat_pln2.rename(columns = {'recency__plan_type__patient_id': "patient_id"})
patient = pd.merge(patient, pat_event2, on='patient_id')
patient = pd.merge(patient, pat_spcl2, on='patient_id')
patient = pd.merge(patient, pat_pln2, on='patient_id')
del pat_event2
del pat_spcl2
del pat_pln2
# Frequency:
for i in range(30,1110,30):
dt=train[train['event_time']<=i].reset_index()
del dt['index']
dt1=dt.groupby(['patient_id', 'event_name'])['event_time'].count().reset_index()
dt2=dt1.pivot(index='patient_id', columns='event_name', values='event_time').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('frequency__event_name__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'frequency__event_name__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,1110,30):
dt=train[train['event_time']<=i].reset_index()
del dt['index']
dt1=dt.groupby(['patient_id', 'specialty'])['event_time'].count().reset_index()
dt2=dt1.pivot(index='patient_id', columns='specialty', values='event_time').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('frequency__specialty__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'frequency__specialty__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,1110,30):
dt=train[train['event_time']<=i].reset_index()
del dt['index']
dt1=dt.groupby(['patient_id', 'plan_type'])['event_time'].count().reset_index()
dt2=dt1.pivot(index='patient_id', columns='plan_type', values='event_time').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('frequency__plan_type__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'frequency__plan_type__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
# NormChange:
for i in range(30,570,30):
data_post = train[train['event_time']<=i].reset_index(drop=True)
data_pre = train[train['event_time']>i].reset_index(drop=True)
data_post1=data_post.groupby(['patient_id', 'event_name'])['event_time'].count().reset_index()
data_post1['feature_value_post'] = data_post1['event_time']/i
data_pre1=data_pre.groupby(['patient_id', 'event_name'])['event_time'].count().reset_index()
data_pre1['feature_value_pre'] = data_pre1['event_time']/(1080 - i)
normChange = pd.merge(data_post1, data_pre1, on=['patient_id', 'event_name'], how='outer')
normChange.fillna(0, inplace=True)
normChange['feature_value'] = np.where(normChange['feature_value_post']>normChange['feature_value_pre'], 1, 0)
normChange=normChange[['patient_id','event_name','feature_value']]
dt2=normChange.pivot(index='patient_id', columns='event_name', values='feature_value').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('normChange__event_name__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'normChange__event_name__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,570,30):
data_post = train[train['event_time']<=i].reset_index(drop=True)
data_pre = train[train['event_time']>i].reset_index(drop=True)
data_post1=data_post.groupby(['patient_id', 'specialty'])['event_time'].count().reset_index()
data_post1['feature_value_post'] = data_post1['event_time']/i
data_pre1=data_pre.groupby(['patient_id', 'specialty'])['event_time'].count().reset_index()
data_pre1['feature_value_pre'] = data_pre1['event_time']/(1080 - i)
normChange = pd.merge(data_post1, data_pre1, on=['patient_id', 'specialty'], how='outer')
normChange.fillna(0, inplace=True)
normChange['feature_value'] = np.where(normChange['feature_value_post']>normChange['feature_value_pre'], 1, 0)
normChange=normChange[['patient_id','specialty','feature_value']]
dt2=normChange.pivot(index='patient_id', columns='specialty', values='feature_value').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('normChange__specialty__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'normChange__specialty__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,570,30):
data_post = train[train['event_time']<=i].reset_index(drop=True)
data_pre = train[train['event_time']>i].reset_index(drop=True)
data_post1=data_post.groupby(['patient_id', 'plan_type'])['event_time'].count().reset_index()
data_post1['feature_value_post'] = data_post1['event_time']/i
data_pre1=data_pre.groupby(['patient_id', 'plan_type'])['event_time'].count().reset_index()
data_pre1['feature_value_pre'] = data_pre1['event_time']/(1080 - i)
normChange = pd.merge(data_post1, data_pre1, on=['patient_id', 'plan_type'], how='outer')
normChange.fillna(0, inplace=True)
normChange['feature_value'] = np.where(normChange['feature_value_post']>normChange['feature_value_pre'], 1, 0)
normChange=normChange[['patient_id','plan_type','feature_value']]
dt2=normChange.pivot(index='patient_id', columns='plan_type', values='feature_value').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('normChange__plan_type__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'normChange__plan_type__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
patient.to_csv('train_features.csv', index=False)
# # Feature Generation for Test:
# In[ ]:
train = pd.read_csv("test_data.csv")
patient=pd.DataFrame(train.patient_id.unique())
patient.columns=['patient_id']
# Recency:
pat_event=train.groupby(['patient_id', 'event_name'])['event_time'].min().reset_index()
pat_event2=pat_event.pivot(index='patient_id', columns='event_name', values='event_time').reset_index()
pat_event2= pat_event2.rename_axis(None, axis=1)
del pat_event
pat_spcl=train.groupby(['patient_id', 'specialty'])['event_time'].min().reset_index()
pat_spcl2=pat_spcl.pivot(index='patient_id', columns='specialty', values='event_time').reset_index()
pat_spcl2= pat_spcl2.rename_axis(None, axis=1)
del pat_spcl
pat_pln=train.groupby(['patient_id', 'plan_type'])['event_time'].min().reset_index()
pat_pln2=pat_pln.pivot(index='patient_id', columns='plan_type', values='event_time').reset_index()
pat_pln2= pat_pln2.rename_axis(None, axis=1)
del pat_pln
pat_event2=pat_event2.add_prefix('recency__event_name__')
pat_event2 = pat_event2.rename(columns = {'recency__event_name__patient_id': "patient_id"})
pat_spcl2=pat_spcl2.add_prefix('recency__specialty__')
pat_spcl2 = pat_spcl2.rename(columns = {'recency__specialty__patient_id': "patient_id"})
pat_pln2=pat_pln2.add_prefix('recency__plan_type__')
pat_pln2 = pat_pln2.rename(columns = {'recency__plan_type__patient_id': "patient_id"})
patient = pd.merge(patient, pat_event2, on='patient_id')
patient = pd.merge(patient, pat_spcl2, on='patient_id')
patient = pd.merge(patient, pat_pln2, on='patient_id')
del pat_event2
del pat_spcl2
del pat_pln2
# Frequency:
for i in range(30,1110,30):
dt=train[train['event_time']<=i].reset_index()
del dt['index']
dt1=dt.groupby(['patient_id', 'event_name'])['event_time'].count().reset_index()
dt2=dt1.pivot(index='patient_id', columns='event_name', values='event_time').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('frequency__event_name__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'frequency__event_name__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,1110,30):
dt=train[train['event_time']<=i].reset_index()
del dt['index']
dt1=dt.groupby(['patient_id', 'specialty'])['event_time'].count().reset_index()
dt2=dt1.pivot(index='patient_id', columns='specialty', values='event_time').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('frequency__specialty__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'frequency__specialty__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,1110,30):
dt=train[train['event_time']<=i].reset_index()
del dt['index']
dt1=dt.groupby(['patient_id', 'plan_type'])['event_time'].count().reset_index()
dt2=dt1.pivot(index='patient_id', columns='plan_type', values='event_time').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('frequency__plan_type__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'frequency__plan_type__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
# NormChange:
for i in range(30,570,30):
data_post = train[train['event_time']<=i].reset_index(drop=True)
data_pre = train[train['event_time']>i].reset_index(drop=True)
data_post1=data_post.groupby(['patient_id', 'event_name'])['event_time'].count().reset_index()
data_post1['feature_value_post'] = data_post1['event_time']/i
data_pre1=data_pre.groupby(['patient_id', 'event_name'])['event_time'].count().reset_index()
data_pre1['feature_value_pre'] = data_pre1['event_time']/(1080 - i)
normChange = pd.merge(data_post1, data_pre1, on=['patient_id', 'event_name'], how='outer')
normChange.fillna(0, inplace=True)
normChange['feature_value'] = np.where(normChange['feature_value_post']>normChange['feature_value_pre'], 1, 0)
normChange=normChange[['patient_id','event_name','feature_value']]
dt2=normChange.pivot(index='patient_id', columns='event_name', values='feature_value').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('normChange__event_name__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'normChange__event_name__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,570,30):
data_post = train[train['event_time']<=i].reset_index(drop=True)
data_pre = train[train['event_time']>i].reset_index(drop=True)
data_post1=data_post.groupby(['patient_id', 'specialty'])['event_time'].count().reset_index()
data_post1['feature_value_post'] = data_post1['event_time']/i
data_pre1=data_pre.groupby(['patient_id', 'specialty'])['event_time'].count().reset_index()
data_pre1['feature_value_pre'] = data_pre1['event_time']/(1080 - i)
normChange = pd.merge(data_post1, data_pre1, on=['patient_id', 'specialty'], how='outer')
normChange.fillna(0, inplace=True)
normChange['feature_value'] = np.where(normChange['feature_value_post']>normChange['feature_value_pre'], 1, 0)
normChange=normChange[['patient_id','specialty','feature_value']]
dt2=normChange.pivot(index='patient_id', columns='specialty', values='feature_value').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('normChange__specialty__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'normChange__specialty__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
for i in range(30,570,30):
data_post = train[train['event_time']<=i].reset_index(drop=True)
data_pre = train[train['event_time']>i].reset_index(drop=True)
data_post1=data_post.groupby(['patient_id', 'plan_type'])['event_time'].count().reset_index()
data_post1['feature_value_post'] = data_post1['event_time']/i
data_pre1=data_pre.groupby(['patient_id', 'plan_type'])['event_time'].count().reset_index()
data_pre1['feature_value_pre'] = data_pre1['event_time']/(1080 - i)
normChange = pd.merge(data_post1, data_pre1, on=['patient_id', 'plan_type'], how='outer')
normChange.fillna(0, inplace=True)
normChange['feature_value'] = np.where(normChange['feature_value_post']>normChange['feature_value_pre'], 1, 0)
normChange=normChange[['patient_id','plan_type','feature_value']]
dt2=normChange.pivot(index='patient_id', columns='plan_type', values='feature_value').reset_index()
dt2=dt2.rename_axis(None, axis=1)
dt2.fillna(0,inplace=True)
chk2=dt2.add_prefix('normChange__plan_type__')
chk2=chk2.add_suffix('__'+str(i))
chk2=chk2.rename(columns = {'normChange__plan_type__patient_id__'+str(i): 'patient_id'})
patient=pd.merge(patient,chk2, on='patient_id', how='left')
patient.to_csv('test_features.csv', index=False)
# # Feature Selection:
# In[2]:
train_features = pd.read_csv("train_features.csv")
train_labels = pd.read_csv("train_labels.csv")
test_features = pd.read_csv("test_features.csv")
train_features1=pd.merge(train_features,train_labels, on='patient_id', how='left')
train_y=train_features1.outcome_flag
del train_features1['outcome_flag']
del train_features1['patient_id']
del test_features['patient_id']
train_features1.fillna(9999999, inplace=True)
# In[ ]:
X_train, X_validation, y_train, y_validation = train_test_split(train_features1, train_y, train_size=0.7, random_state=1234)
model1=lgb.LGBMClassifier(n_estimators=10000,
n_jobs= -1,
min_child_weight= 1,
feature_fraction= 0.5, #0.5
num_leaves= 80,#80
learning_rate= 0.1,#0.1
colsample_bytree=0.3, #New
random_state=1234)
model1.fit(train_features1, train_y, eval_set=[(X_validation, y_validation)],verbose=200,early_stopping_rounds=500)
t1=model1.feature_importances_
t1=pd.DataFrame(t1)
t2=train_features1.columns
t2=pd.DataFrame(t2)
t1.columns=['Importance']
t2.columns=['Variable']
t3=pd.concat([t2,t1],axis=1)
t4=t3[t3['Importance'] > 0]
my_cols = list(t4.Variable)
train_features2 = train_features1[my_cols]
test_features2 = test_features[my_cols]
test_features2.fillna(9999999, inplace=True)
# # Model Building:
# In[ ]:
folds = StratifiedKFold(n_splits=30, shuffle=True, random_state=12345678)
models = []
scores = []
i=1
dt_v1=pd.DataFrame()
for train_index, test_index in folds.split(train_features2, train_y):
print('###########')
X_train, X_val = train_features2.iloc[train_index], train_features2.iloc[test_index]
y_train, y_val = train_y.iloc[train_index], train_y.iloc[test_index]
lgb_params = {'n_estimators': 10000,
'n_jobs': -1,
'min_child_weight': 1,
'feature_fraction' : 0.7,
'num_leaves' : 40,
'learning_rate':0.01,
'random_state':1234,
'seed':1234
}
model=lgb.LGBMClassifier(**lgb_params)
model.fit(X_train, y_train,eval_set=[(X_val, y_val)],verbose=200,eval_metric='auc',early_stopping_rounds=400)
scores.append([model.predict_proba(test_features2)])
print('\n')
s_test=np.mean(scores, axis=0)
s_test=pd.DataFrame(s_test[0])
s_test=s_test.add_prefix("pred_")
test_features_patient = pd.read_csv("test_features.csv",usecols=["patient_id"])
s_test=pd.concat([test_features_patient,s_test],axis=1)
s_test=s_test[['patient_id','pred_1']]
s_test.columns=['patient_id','outcome_flag']
s_test.to_excel('submission_Best_score.xlsx', index=False)
|
21abb188e0c2cd088b5f434b4958d3e0bbf2bdc8
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-metastudio/huaweicloudsdkmetastudio/v1/model/picture_modeling_by_url_req.py
|
6a200fea29a267adcad55fe7f03aab5cc5f2afe9
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,409
|
py
|
picture_modeling_by_url_req.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PictureModelingByUrlReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'picture_url': 'str',
'style_id': 'str',
'name': 'str',
'notify_url': 'str'
}
attribute_map = {
'picture_url': 'picture_url',
'style_id': 'style_id',
'name': 'name',
'notify_url': 'notify_url'
}
def __init__(self, picture_url=None, style_id=None, name=None, notify_url=None):
"""PictureModelingByUrlReq
The model defined in huaweicloud sdk
:param picture_url: 图片URL
:type picture_url: str
:param style_id: 风格ID
:type style_id: str
:param name: 模型名称
:type name: str
:param notify_url: 照片建模任务结束的回调地址。
:type notify_url: str
"""
self._picture_url = None
self._style_id = None
self._name = None
self._notify_url = None
self.discriminator = None
self.picture_url = picture_url
self.style_id = style_id
self.name = name
if notify_url is not None:
self.notify_url = notify_url
@property
def picture_url(self):
"""Gets the picture_url of this PictureModelingByUrlReq.
图片URL
:return: The picture_url of this PictureModelingByUrlReq.
:rtype: str
"""
return self._picture_url
@picture_url.setter
def picture_url(self, picture_url):
"""Sets the picture_url of this PictureModelingByUrlReq.
图片URL
:param picture_url: The picture_url of this PictureModelingByUrlReq.
:type picture_url: str
"""
self._picture_url = picture_url
@property
def style_id(self):
"""Gets the style_id of this PictureModelingByUrlReq.
风格ID
:return: The style_id of this PictureModelingByUrlReq.
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this PictureModelingByUrlReq.
风格ID
:param style_id: The style_id of this PictureModelingByUrlReq.
:type style_id: str
"""
self._style_id = style_id
@property
def name(self):
"""Gets the name of this PictureModelingByUrlReq.
模型名称
:return: The name of this PictureModelingByUrlReq.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PictureModelingByUrlReq.
模型名称
:param name: The name of this PictureModelingByUrlReq.
:type name: str
"""
self._name = name
@property
def notify_url(self):
"""Gets the notify_url of this PictureModelingByUrlReq.
照片建模任务结束的回调地址。
:return: The notify_url of this PictureModelingByUrlReq.
:rtype: str
"""
return self._notify_url
@notify_url.setter
def notify_url(self, notify_url):
"""Sets the notify_url of this PictureModelingByUrlReq.
照片建模任务结束的回调地址。
:param notify_url: The notify_url of this PictureModelingByUrlReq.
:type notify_url: str
"""
self._notify_url = notify_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PictureModelingByUrlReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
cc2558b659d075a607830762da47790fad3a88a9
|
ee63c4aa5ae8613d09080b2b766cd335f3c37a10
|
/tools/tensorflow_docs/api_generator/compat_test/estimator.py
|
0b54303ca3919b5ddbc87cf69ba84e5682bb9c89
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
tensorflow/docs
|
fa7d1823d36dd07a8ec22011049ac9a3fe8124fe
|
a1abe995b09aca2768c03e509a146354828c6aa3
|
refs/heads/master
| 2023-08-21T01:02:59.624541
| 2023-08-18T20:53:45
| 2023-08-18T20:54:23
| 129,317,474
| 6,515
| 6,676
|
Apache-2.0
| 2023-09-14T15:13:04
| 2018-04-12T22:23:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is a test module.
@compatibility(TF2)
test
@end_compatibility
Hello
"""
def a_function(x, y):
"""This is a function.
@compatibility(TF2)
test
@end_compatibility
@compatibility(numpy)
test
@end_compatibility
It does things.
Args:
x: x
y: y
Returns:
None
"""
del x
del y
return None
class AClass:
"""This is a class.
@compatibility(TF2)
test
@end_compatibility
It does things too.
Attributes:
x: x
y: x
"""
def __init__(self, x, y):
self.x = x
self.y = y
def a_method(self, x, y):
"""Methods can have compatibility notes too.
@compatibility(TF2)
test
@end_compatibility
It does things too.
Args:
x: x
y: y
Returns:
None
"""
del x
del y
return None
|
e5c539edf872a8f7091575cd3eaca73969fe103e
|
2ddbcefa36bf68ad2e7f4018c5283b7aef726a1f
|
/etw/GUID.py
|
6edf9543ac00f21117f877bd56ba70a6c0fe3bda
|
[
"Apache-2.0"
] |
permissive
|
fireeye/pywintrace
|
c275ba90f8ec942a4ff2f03fb700c562859e5f25
|
977eeb85d08982c160d9594f5f875f54db7a3415
|
refs/heads/master
| 2023-08-16T14:29:28.361857
| 2023-03-23T18:41:16
| 2023-03-23T18:41:16
| 102,869,990
| 273
| 73
|
Apache-2.0
| 2023-03-23T18:41:18
| 2017-09-08T14:27:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,208
|
py
|
GUID.py
|
########################################################################
# Copyright 2017 FireEye Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################
import ctypes as ct
def cmp(a, b):
return (a > b) - (a < b)
BYTE = ct.c_byte
WORD = ct.c_ushort
DWORD = ct.c_ulong
_ole32 = ct.oledll.ole32
_StringFromCLSID = _ole32.StringFromCLSID
_CoTaskMemFree = ct.windll.ole32.CoTaskMemFree
_ProgIDFromCLSID = _ole32.ProgIDFromCLSID
_CLSIDFromString = _ole32.CLSIDFromString
_CLSIDFromProgID = _ole32.CLSIDFromProgID
_CoCreateGuid = _ole32.CoCreateGuid
# python 2
try:
basestring
except NameError:
# python 3
basestring = str
class GUID(ct.Structure):
_fields_ = [("Data1", DWORD),
("Data2", WORD),
("Data3", WORD),
("Data4", BYTE * 8)]
def __init__(self, name=None):
if name is not None:
_CLSIDFromString(str(name), ct.byref(self))
def __repr__(self):
return 'GUID("%s")' % str(self)
def __str__(self):
p = ct.c_wchar_p()
_StringFromCLSID(ct.byref(self), ct.byref(p))
result = p.value
_CoTaskMemFree(p)
return result
def __cmp__(self, other):
if isinstance(other, GUID):
return cmp(bytes(self), bytes(other))
return -1
def __nonzero__(self):
return self != GUID_null
def __eq__(self, other):
return isinstance(other, GUID) and bytes(self) == bytes(other)
def __hash__(self):
# We make GUID instances hashable, although they are mutable.
return hash(bytes(self))
def copy(self):
return GUID(str(self))
@classmethod
def from_progid(cls, progid):
"""Get guid from progid, ...
"""
if hasattr(progid, "_reg_clsid_"):
progid = progid._reg_clsid_
if isinstance(progid, cls):
return progid
elif isinstance(progid, basestring):
if progid.startswith("{"):
return cls(progid)
inst = cls()
_CLSIDFromProgID(str(progid), ct.byref(inst))
return inst
else:
raise TypeError("Cannot construct guid from %r" % progid)
def as_progid(self):
"Convert a GUID into a progid"
progid = ct.c_wchar_p()
_ProgIDFromCLSID(ct.byref(self), ct.byref(progid))
result = progid.value
_CoTaskMemFree(progid)
return result
@classmethod
def create_new(cls):
"Create a brand new guid"
guid = cls()
_CoCreateGuid(ct.byref(guid))
return guid
GUID_null = GUID()
__all__ = ["GUID"]
|
d0a60c4333ab9faa260e73567e1b1758ca6f08ff
|
e23256d6e31720bdb568694faee21ff51fd7e505
|
/pixellib/instance/mask_rcnn.py
|
e9e99f8348a9d817a221a2f8f3dc8ef60056be16
|
[
"MIT"
] |
permissive
|
ayoolaolafenwa/PixelLib
|
534d94833ce1b499d43fd0534ad70de1b917615c
|
3d2a5abb02f081eb9446bf7d1eaa4b773ff9bd1d
|
refs/heads/master
| 2023-08-30T14:44:30.049673
| 2022-01-29T22:51:36
| 2022-01-29T22:51:36
| 255,074,156
| 983
| 281
|
MIT
| 2022-08-09T20:15:08
| 2020-04-12T12:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 125,466
|
py
|
mask_rcnn.py
|
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import datetime
import re
import math
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import tensorflow.keras.layers as KE
import tensorflow.keras.utils as KU
from tensorflow.python.eager import context
import tensorflow.keras.models as KM
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from pixellib.instance import utils
import logging
tf.compat.v1.disable_eager_execution()
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def get_config(self):
config = super(ProposalLayer, self).get_config()
config["config"] = self.config.to_dict()
config["proposal_count"] = self.proposal_count
config["nms_threshold"] = self.nms_threshold
return config
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(input=anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(input=proposals)[0], 0)
proposals = tf.pad(tensor=proposals, paddings=[(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(None)
proposals.set_shape(out_shape)
return proposals
def compute_output_shape(self, input_shape):
return None, self.proposal_count, 4
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def get_config(self):
config = super(PyramidROIAlign, self).get_config()
config['pool_shape'] = self.pool_shape
return config
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.compat.v1.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(input=box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
input=box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(input=boxes)[:2], tf.shape(input=pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(input=boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(input=boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(input=boxes1)[0], tf.shape(input=boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(input=proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(tensor=gt_class_ids, mask=non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.compat.v1.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.compat.v1.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.compat.v1.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(input_tensor=crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(input_tensor=overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.compat.v1.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.compat.v1.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random.shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(input=positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
pred=tf.greater(tf.shape(input=positive_overlaps)[1], 0),
true_fn=lambda: tf.argmax(input=positive_overlaps, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(a=gt_masks, perm=[2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(input=roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(input=negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(input=rois)[0], 0)
rois = tf.pad(tensor=rois, paddings=[(0, P), (0, 0)])
roi_gt_boxes = tf.pad(tensor=roi_gt_boxes, paddings=[(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(tensor=roi_gt_class_ids, paddings=[(0, N + P)])
deltas = tf.pad(tensor=deltas, paddings=[(0, N + P), (0, 0)])
masks = tf.pad(tensor=masks, paddings=[[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def get_config(self):
config = super(DetectionTargetLayer, self).get_config()
config["config"] = self.config.to_dict()
return config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(input=probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.compat.v1.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.compat.v1.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.compat.v1.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=class_keep)[0]
class_keep = tf.pad(tensor=class_keep, paddings=[(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.compat.v1.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(input=class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.dtypes.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=detections)[0]
detections = tf.pad(tensor=detections, paddings=[(0, gap), (0, 0)], mode="CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def get_config(self):
config = super(DetectionLayer, self).get_config()
config["config"] = self.config.to_dict()
return config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
if s[1] is None:
mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)
else:
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.compat.v1.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.compat.v1.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(input=pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(input_tensor=loss) / tf.reduce_sum(input_tensor=pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(input=target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(input=target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(input=pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(a=pred_masks, perm=[0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(input=y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augmentation=False, training = False, USE_MINI_MASK = False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# St+ore shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if training == True:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
else:
USE_MINI_MASK = False
#mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
class DataGenerator(KU.Sequence):
"""An iterable that returns images and corresponding target class ids,
bounding box deltas, and masks. It inherits from keras.utils.Sequence to avoid data redundancy
when multiprocessing=True.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python iterable. Upon calling __getitem__() on it, the
iterable returns two lists, inputs and outputs. The contents
of the lists differ depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
def __init__(self, dataset, config, shuffle=True, augmentation=None,
random_rois=0, detection_targets=False):
self.image_ids = np.copy(dataset.image_ids)
self.dataset = dataset
self.config = config
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
self.backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.shuffle = shuffle
self.augmentation = augmentation
self.random_rois = random_rois
self.batch_size = self.config.BATCH_SIZE
self.detection_targets = detection_targets
def __len__(self):
return int(np.ceil(len(self.image_ids) / float(self.batch_size)))
def __getitem__(self, idx):
b = 0
image_index = -1
while b < self.batch_size:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(self.image_ids)
if self.shuffle and image_index == 0:
np.random.shuffle(self.image_ids)
# Get GT bounding boxes and masks for image.
image_id = self.image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(self.dataset, self.config, image_id,training=True,
augmentation=self.augmentation)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = generate_random_rois(
image.shape, self.random_rois, gt_class_ids, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), self.config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[b] = rpn_rois
if self.detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
return inputs, outputs
############################################################
# MaskRCNN Class
############################################################
class custom_layer(tf.keras.layers.Layer):
def __init__(self, name="anchors", **kwargs):
super(custom_layer, self).__init__(name=name, **kwargs)
def call(self, anchor):
return anchor
def get_config(self) :
config = super(custom_layer, self).get_config()
return config
class MaskRCNN(object):
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
self.lr = self.config.LEARNING_RATE
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
anchor_layer = custom_layer(name="anchors")
anchors = anchor_layer(anchors)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
"""if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
"""
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
from tensorflow.python.keras.saving import hdf5_format
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)
else:
hdf5_format.load_weights_from_hdf5_group(f, layers)
# Update the log directory
self.set_log_dir(filepath)
def lr_scheduler(self, epoch):
if(epoch > 0 and epoch % 50 == 0):
self.lr = self.lr/10
#print("Learning Rate", self.lr)
return self.lr
def compile(self):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(self.lr_scheduler(0), momentum = self.config.LEARNING_MOMENTUM,
clipnorm = self.config.GRADIENT_CLIP_NORM)
# Add Losses
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(input_tensor=layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(input=w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(input_tensor=layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_metric(loss, name=name, aggregation='mean')
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
"""if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
"""
def set_log_dir(self, model_path = None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
# If we have a model path with date and epochs use them
self.model_name = 'mask_rcnn_model.{epoch:03d}-{val_loss:01f}.h5'
def train(self, train_dataset, val_dataset, epochs,layers,models,
augmentation=False, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = DataGenerator(train_dataset, self.config, shuffle=True,
augmentation=augmentation)
val_generator = DataGenerator(val_dataset, self.config, shuffle=True)
lr_rate = LearningRateScheduler(self.lr_scheduler)
path_models = models
self.save_directory = os.path.join(self.model_dir, path_models)
if not os.path.isdir(self.save_directory):
os.makedirs(self.save_directory)
self.checkpoint_path = os.path.join(self.save_directory, self.model_name)
callb = [
ModelCheckpoint(self.checkpoint_path,save_weights_only=True,save_best_only = True, monitor = "val_loss", verbose = 0),
lr_rate ,
]
# Train
#log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.save_directory))
self.set_trainable(layers)
#self.compile(self.lr_scheduler(0))
self.compile()
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name == 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callb,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
verbose = 1
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
"""if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
"""
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
"""if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
"""
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(input_tensor=tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(tensor=boxes, mask=non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
|
88d206d76a154a06453ed6187e470719c8744dcd
|
afc3558e47ea4c82cb70190743472274eae7aeb1
|
/mmocr/utils/point_utils.py
|
809805f2eaf44337c184216375428f07e99899b9
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmocr
|
86a77fb77ca80cede9c41a9a22080eeeaf364002
|
9551af6e5a2482e72a2af1e3b8597fd54b999d69
|
refs/heads/main
| 2023-08-03T14:06:11.075037
| 2023-07-26T02:32:14
| 2023-07-26T02:32:14
| 355,559,187
| 3,734
| 801
|
Apache-2.0
| 2023-09-12T03:17:12
| 2021-04-07T13:40:21
|
Python
|
UTF-8
|
Python
| false
| false
| 953
|
py
|
point_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmocr.utils.typing_utils import ArrayLike
def points_center(points: ArrayLike) -> np.ndarray:
"""Calculate the center of a set of points.
Args:
points (ArrayLike): A set of points.
Returns:
np.ndarray: The coordinate of center point.
"""
points = np.array(points, dtype=np.float32)
assert points.size % 2 == 0
points = points.reshape([-1, 2])
return np.mean(points, axis=0)
def point_distance(pt1: ArrayLike, pt2: ArrayLike) -> float:
"""Calculate the distance between two points.
Args:
pt1 (ArrayLike): The first point.
pt2 (ArrayLike): The second point.
Returns:
float: The distance between two points.
"""
pt1 = np.array(pt1)
pt2 = np.array(pt2)
assert (pt1.size == 2 and pt2.size == 2)
dist = np.square(pt2 - pt1).sum()
dist = np.sqrt(dist)
return dist
|
b1a650b937b507391196567a83dec206663e2280
|
312a8fde11293cb142334a3860966ec1f75ac401
|
/importer_client/python/timesketch_import_client/helper_test.py
|
8711a2b627086508f58b312afba29b9637ab8074
|
[
"Apache-2.0"
] |
permissive
|
google/timesketch
|
f0fd09062a8a24bac581d2d4286d095d667d2f10
|
24f471b58ca4a87cb053961b5f05c07a544ca7b8
|
refs/heads/master
| 2023-08-31T21:48:19.602686
| 2023-08-31T11:24:17
| 2023-08-31T11:24:17
| 21,009,909
| 2,263
| 647
|
Apache-2.0
| 2023-09-14T14:08:07
| 2014-06-19T17:49:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,792
|
py
|
helper_test.py
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Timesketch importer."""
from __future__ import unicode_literals
import unittest
import tempfile
from . import helper
MOCK_CONFIG = """
foobar:
message: 'The {cA} went bananas with {cC}, but without letting {cD} know.'
timestamp_desc: 'Event Logged'
datetime: 'cB'
columns_subset: 'cA,cB,cC,cD'
bar:
message: 'Some people {stuff}, with {other}'
timestamp_desc: 'Stuff Happened'
separator: '>'
encoding: 'secret-formula'
data_type: 'data:secret:message'
vindur:
message: 'Vedrid i dag er: {vedur}, med typiskri {auka}'
timestamp_desc: 'Thu Veist Thad'
columns: 'vedur,magn,auka,rigning'
"""
class MockStreamer:
"""Mock streamer object for testing."""
def __init__(self):
"""Initialize."""
self.format_string = ""
self.timestamp_description = ""
self.csv_delimiter = ""
self.text_encoding = ""
self.datetime_column = ""
def set_message_format_string(self, message):
self.format_string = message
def set_timestamp_description(self, timestamp_desc):
self.timestamp_description = timestamp_desc
def set_csv_delimiter(self, separator):
self.csv_delimiter = separator
def set_text_encoding(self, encoding):
self.text_encoding = encoding
def set_datetime_column(self, datetime_string):
self.datetime_column = datetime_string
class TimesketchHelperTest(unittest.TestCase):
"""Test Timesketch import helper."""
def setUp(self):
"""Set up the test."""
self._helper = helper.ImportHelper()
with tempfile.NamedTemporaryFile("w", suffix=".yaml") as fw:
fw.write(MOCK_CONFIG)
fw.seek(0)
self._helper.add_config(fw.name)
def test_not_config(self):
"""Test a helper that does not match."""
streamer = MockStreamer()
self._helper.configure_streamer(streamer, data_type="foo:no")
self.assertEqual(streamer.format_string, "")
self.assertEqual(streamer.timestamp_description, "")
self.assertEqual(streamer.csv_delimiter, "")
self.assertEqual(streamer.text_encoding, "")
self.assertEqual(streamer.datetime_column, "")
def test_sub_column(self):
"""Test a helper that matches on sub columns."""
streamer = MockStreamer()
self._helper.configure_streamer(
streamer,
data_type="foo:no",
columns=["cA", "cB", "cC", "cD", "cE", "cF", "cG"],
)
self.assertEqual(
streamer.format_string,
"The {cA} went bananas with {cC}, but without letting {cD} know.",
)
self.assertEqual(streamer.timestamp_description, "Event Logged")
self.assertEqual(streamer.csv_delimiter, "")
self.assertEqual(streamer.text_encoding, "")
self.assertEqual(streamer.datetime_column, "cB")
def test_columns(self):
"""Test a helper that matches on columns."""
streamer = MockStreamer()
self._helper.configure_streamer(
streamer, data_type="foo:no", columns=["vedur", "magn", "auka", "rigning"]
)
self.assertEqual(
streamer.format_string, "Vedrid i dag er: {vedur}, med typiskri {auka}"
)
self.assertEqual(streamer.timestamp_description, "Thu Veist Thad")
self.assertEqual(streamer.csv_delimiter, "")
self.assertEqual(streamer.text_encoding, "")
self.assertEqual(streamer.datetime_column, "")
def test_data_type(self):
"""Test a helper that matches on data_type."""
streamer = MockStreamer()
self._helper.configure_streamer(
streamer,
data_type="data:secret:message",
columns=["vedur", "auka", "rigning"],
)
self.assertEqual(streamer.format_string, "Some people {stuff}, with {other}")
self.assertEqual(streamer.timestamp_description, "Stuff Happened")
self.assertEqual(streamer.csv_delimiter, ">")
self.assertEqual(streamer.text_encoding, "secret-formula")
self.assertEqual(streamer.datetime_column, "")
|
5a9a1fa51b417a496ada59c0f2391b5601c12fca
|
bb0e88e72382e27c48222d1e7a308476794740fe
|
/core/lib/payload/cleanup.py
|
fe99f69f95cad857ee6dcfc40062dfb7365ca639
|
[
"BSD-3-Clause"
] |
permissive
|
facebookincubator/OnlineSchemaChange
|
b6b0e62618a54bc5a7058a8742a9c5eef395d22a
|
99c27961ec3a87c7d1bb650107e79971ae8c465f
|
refs/heads/main
| 2023-09-01T21:09:11.494282
| 2023-09-01T05:53:07
| 2023-09-01T05:53:07
| 79,285,486
| 998
| 167
|
NOASSERTION
| 2023-01-24T03:10:54
| 2017-01-18T00:06:19
|
Python
|
UTF-8
|
Python
| false
| false
| 12,920
|
py
|
cleanup.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2017-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import re
import time
import MySQLdb
from .. import constant, sql, util
from ..error import OSCError
from ..sql import escape
from .base import Payload
log = logging.getLogger(__name__)
class CleanupPayload(Payload):
"""
This payload is not a schema change payload itself. It'll cleanup all the
mess left behind by last OSC run
"""
def __init__(self, *args, **kwargs):
super(CleanupPayload, self).__init__(*args, **kwargs)
self.files_to_clean = []
self.to_drop = []
self.sqls_to_execute = []
self._current_db = kwargs.get("db")
self._current_table = kwargs.get("table")
self.databases = kwargs.get("database")
self.kill_first = kwargs.get("kill", False)
self.kill_only = kwargs.get("kill_only", False)
self.print_tables = kwargs.get("print_tables", False)
self.tables_to_print = []
def set_current_table(self, table_name):
self._current_table = table_name
def cleanup(self, db="mysql"):
"""
The actual cleanup logic, we will:
- remove all the given files
- drop all the given triggers
- drop all the tables
"""
# Remove file first, because drop trigger may fail because of a full
# disk.
for filepath in self.files_to_clean:
try:
if os.path.isfile(filepath):
util.rm(filepath, self.sudo)
except Exception:
# We will try our best to do the cleanup even when there's an
# exception, because each cleanup entry is independent on each
# other
log.exception("Failed to cleanup file: {}".format(filepath))
# Drop table and triggers
# If we have multiple databases, re-require the connection
# since the previous connection might already reach wait_timeout
if not self._conn or (self.databases and len(self.databases) > 1):
self._conn = self.get_conn(db)
if self.print_tables:
self.tables_to_print.append(
("SELECT * FROM `{}`".format(self._current_table), db)
)
self.print_osc_tables(db)
self.gen_drop_sqls()
self.get_mysql_settings()
self.init_mysql_version()
self.set_no_binlog()
self.stop_slave_sql()
# Stop sql thread to avoid MDL lock contention and blocking reads before
# running DDLs. Will use high_pri_ddl instead if it's supported
if self.is_high_pri_ddl_supported:
self.enable_priority_ddl()
else:
self.lock_tables(tables=[self.table_name])
self.execute_sql("USE `{}`".format(escape(db)))
current_db = db
for stmt, stmt_db in self.sqls_to_execute:
cleanupError = False
try:
# Switch to the database we are going to work on to avoid
# cross db SQL execution
if stmt_db != current_db:
self.execute_sql("USE `{}`".format(escape(stmt_db)))
current_db = stmt_db
log.info("Executing db: {} sql: {}".format(stmt_db, stmt))
self.execute_sql(stmt)
except MySQLdb.OperationalError as e:
errnum, _ = e.args
# 1507 means the partition doesn't exist, which
# is most likely competing partition maintenance
# 1508 means we tried to drop the last partition in a table
if errnum in [1507, 1508]:
continue
cleanupError = True
error = e
except Exception as e:
cleanupError = True
error = e
if cleanupError:
self.sqls_to_execute = []
if not self.is_high_pri_ddl_supported:
self.unlock_tables()
self.start_slave_sql()
log.error("Failed to execute sql for cleanup")
raise OSCError(
"CLEANUP_EXECUTION_ERROR", {"sql": stmt, "msg": str(error)}
)
if not self.is_high_pri_ddl_supported:
self.unlock_tables()
self.sqls_to_execute = []
self.start_slave_sql()
def print_osc_tables(self, db="mysql"):
# print all tables in OSC job in test
if not self._conn or (self.databases and len(self.databases) > 1):
self._conn = self.get_conn(db)
self.execute_sql("USE `{}`".format(escape(db)))
for stmt, stmt_db in self.tables_to_print:
# Work on the currernt db only
if stmt_db != db:
continue
try:
rows = self.query(stmt)
for row in rows:
log.debug(row)
except Exception:
# If there's an exception (e.g. the table is renamed), just skip it
continue
def add_file_entry(self, filepath):
log.debug("Cleanup file entry added: {}".format(filepath))
self.files_to_clean.append(filepath)
def remove_file_entry(self, filepath):
log.debug("Cleanup file entry removed: {}".format(filepath))
self.files_to_clean.remove(filepath)
def remove_all_file_entries(self):
log.debug("Removing all cleanup file entries")
self.files_to_clean = []
def add_sql_entry(self, sql):
log.debug("Cleanup SQL entry added: {}".format(sql))
self.sqls_to_execute.append(sql)
def gen_drop_sqls(self):
# always drop trigger first, otherwise there's a small window
# in which we have trigger exists but not having the corresponding
# _chg table. If a change happens during this window, then replication
# will break
log.info("Generating drop trigger queries")
for entry in self.to_drop:
if entry["type"] == "trigger":
db = entry["db"]
trigger_name = entry["name"]
sql_query = "DROP TRIGGER IF EXISTS `{}`".format(escape(trigger_name))
self.sqls_to_execute.append((sql_query, db))
log.info("Generating drop table queries")
for entry in self.to_drop:
if entry["type"] == "table":
db = entry["db"]
table = entry["name"]
partition_method = self.get_partition_method(db, table)
if partition_method in ("RANGE", "LIST"):
# MySQL doesn't allow remove all the partitions in a
# partitioned table, so we will leave single partition
# there before drop the table
if entry["partitions"]:
entry["partitions"].pop()
# Gradually drop partitions, so that we will not hold
# metadata lock for too long and block requests with
# single drop table
log.debug(
"{}/{} using {} partitioning method".format(
db, table, partition_method
)
)
for partition_name in entry["partitions"]:
# As of version 8.0.17, MySQL does not support
# "DROP PARTITION IF EXISTS".
sql_query = (
"ALTER TABLE `{}` " "DROP PARTITION `{}`"
).format(escape(table), escape(partition_name))
self.sqls_to_execute.append((sql_query, db))
sql_query = "DROP TABLE IF EXISTS `{}`".format(table)
self.sqls_to_execute.append((sql_query, db))
self.to_drop = []
def add_drop_table_entry(self, db, table, partitions=None):
self.to_drop.append(
{"type": "table", "db": db, "name": table, "partitions": partitions}
)
self.tables_to_print.append(("SELECT * FROM `{}`".format(table), db))
def remove_drop_table_entry(self, db, table_name):
for entry in self.to_drop:
if entry["type"] == "table" and entry["name"] == table_name:
self.to_drop.remove(entry)
def add_drop_trigger_entry(self, db, trigger_name):
self.to_drop.append({"type": "trigger", "db": db, "name": trigger_name})
def run_ddl(self):
"""
Try to search all the garbage left over by OSC and clean them
"""
self.cleanup()
def search_for_tables(self):
"""
List all the tables that may left over by OSC in last run
"""
if self.databases:
for db in self.databases:
results = self.query(
sql.get_all_osc_tables(db),
(
constant.PREFIX,
constant.PREFIX,
db,
),
)
for row in results:
self.add_drop_table_entry(db, row["TABLE_NAME"])
else:
results = self.query(
sql.get_all_osc_tables(),
(
constant.PREFIX,
constant.PREFIX,
),
)
for row in results:
self.add_drop_table_entry(row["db"], row["TABLE_NAME"])
def search_for_triggers(self):
"""
List all the triggers that may left over by OSC in last run
"""
if self.databases:
for db in self.databases:
results = self.query(
sql.get_all_osc_triggers(db),
(
constant.PREFIX,
constant.PREFIX,
db,
),
)
for row in results:
self.add_drop_trigger_entry(db, row["TRIGGER_NAME"])
else:
results = self.query(
sql.get_all_osc_triggers(),
(
constant.PREFIX,
constant.PREFIX,
),
)
for row in results:
self.add_drop_trigger_entry(row["db"], row["TRIGGER_NAME"])
def search_for_files(self):
"""
List all the files that may have been left over by OSC in previous runs
TODO: cleaning up is also done a lot in copy.py, so a future
improvement here could be to refactor OSC in such a way that the
cleanup part can be easily reused. T28154647
"""
datadir = self.query(sql.select_as("@@datadir", "dir"))[0]["dir"]
for root, _, files in os.walk(datadir):
for fname in files:
if re.match(r"__osc_.*\.[0-9]+", fname):
self.add_file_entry(os.path.join(root, fname))
def kill_osc(self):
"""
Kill the running OSC process if there's one running.
"""
result = self.query(
"SELECT IS_USED_LOCK(%s) as owner_id", (constant.OSC_LOCK_NAME,)
)
owner_id = result[0]["owner_id"]
if owner_id:
log.info(
"Named lock: {} is held by {}. Killing it to free up "
"the lock".format(constant.OSC_LOCK_NAME, owner_id)
)
# If we kill the mysql connection which is holding the named lock,
# then OSC's python process will encounter a "MySQL has gone away"
# error, and do the cleanup, then exit
self.execute_sql(sql.kill_proc, (owner_id,))
else:
log.info("No other OSC is running at the moment")
def cleanup_all(self):
"""
Try to list all the possible files/tables left over by an unclean OSC
exit, and remove all of them
"""
if self.kill_first:
self.kill_osc()
log.info("Wait 5 seconds for the running OSC to cleanup its own stuff ")
time.sleep(5)
if self.kill_only:
return
# Cleanup triggers first, otherwise DML against original table may fail
# with a "table not exist" error. Because the table which is referenced
# in the trigger was dropped first.
self.search_for_triggers()
self.search_for_tables()
self.search_for_files()
# cleanup is a critical part, We need to make sure there's no other
# OSC running
self.get_osc_lock()
self.cleanup()
self.release_osc_lock()
|
4902bc3a8ed92e3e0a2b7f1d638f325b9bbbe66d
|
24021fe2eb98aca9bc35f5b017fa8c631581bb54
|
/vulnerabilities/tests/test_pysec.py
|
f5d8107ac7f6f024f49e84a9a5a5d3192a41f3b9
|
[
"Apache-2.0",
"CC-BY-SA-4.0",
"Python-2.0"
] |
permissive
|
nexB/vulnerablecode
|
0253160c1b04cd992899bf5b74ad76ac125a68ae
|
eec05bb0f796d743e408a1b402df8abfc8344669
|
refs/heads/main
| 2023-09-03T21:48:21.368810
| 2023-08-31T16:52:41
| 2023-08-31T16:52:41
| 91,780,998
| 371
| 181
|
Apache-2.0
| 2023-09-11T19:24:24
| 2017-05-19T07:56:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
test_pysec.py
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import json
import os
from unittest import TestCase
from vulnerabilities.importers.osv import parse_advisory_data
from vulnerabilities.tests.util_tests import VULNERABLECODE_REGEN_TEST_FIXTURES as REGEN
from vulnerabilities.tests.util_tests import check_results_against_json
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/pysec")
class TestPyPIImporter(TestCase):
def test_to_advisories_with_summary(self):
with open(os.path.join(TEST_DATA, "pysec-advisories_with_summary.json")) as f:
mock_response = json.load(f)
results = parse_advisory_data(mock_response, "pypi").to_dict()
expected_file = os.path.join(TEST_DATA, "pysec-advisories_with_summary-expected.json")
check_results_against_json(
results=results,
expected_file=expected_file,
regen=REGEN,
)
def test_to_advisories_without_summary(self):
with open(os.path.join(TEST_DATA, "pysec-advisories_without_summary.json")) as f:
mock_response = json.load(f)
results = parse_advisory_data(mock_response, "pypi").to_dict()
expected_file = os.path.join(TEST_DATA, "pysec-advisories_without_summary-expected.json")
check_results_against_json(
results=results,
expected_file=expected_file,
regen=REGEN,
)
|
7860a146531b2fe6f6810d0f038d4776ec29bcd2
|
84961c5b8a190f7e2a35107bc9abf2259617e8c2
|
/Chapter03/fakelogger.py
|
107303c49a515b45dd0fff2836dde1cbd57196a5
|
[
"MIT"
] |
permissive
|
PacktPublishing/Software-Architecture-with-Python
|
f1002dafba83d74a7096dfad4ac4cd338ee9e425
|
446b78ba0f5e23039ae0de71fb433424170fac67
|
refs/heads/master
| 2023-02-06T11:11:46.699458
| 2023-01-30T09:38:05
| 2023-01-30T09:38:05
| 90,152,034
| 126
| 91
|
MIT
| 2019-05-08T06:46:58
| 2017-05-03T13:28:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
fakelogger.py
|
# Code Listing #2
import logging
class FakeLogger(object):
""" A class that fakes the interface of the
logging.Logger object in a minimalistic fashion """
def __init__(self):
self.lvl = logging.INFO
def setLevel(self, level):
""" Set the logging level """
self.lvl = level
def _log(self, msg, *args):
""" Perform the actual logging """
# Since this is a fake object - no actual logging is done.
# Instead the message is simply printed to standard output.
print (msg, end=' ')
for arg in args:
print(arg, end=' ')
print()
def info(self, msg, *args):
""" Log at info level """
if self.lvl<=logging.INFO: return self._log(msg, *args)
def debug(self, msg, *args):
""" Log at debug level """
if self.lvl<=logging.DEBUG: return self._log(msg, *args)
def warning(self, msg, *args):
""" Log at warning level """
if self.lvl<=logging.WARNING: return self._log(msg, *args)
def error(self, msg, *args):
""" Log at error level """
if self.lvl<=logging.ERROR: return self._log(msg, *args)
def critical(self, msg, *args):
""" Log at critical level """
if self.lvl<=logging.CRITICAL: return self._log(msg, *args)
|
c8cd40c7378d68b7c65302e86b706233b73cd400
|
6f9375e722264d863734c6f44e883c9d8799386d
|
/ui/tests/unit/toolkit/test_configuration.py
|
e92a8b81d27932683a2da951baa3ee7475c15336
|
[
"PostgreSQL"
] |
permissive
|
dalibo/temboard
|
8e5fcf5604c5f234a5c1be7d75e871251d80e61e
|
d26cb848f4b064e05d5e422ecc001889f224bd74
|
refs/heads/master
| 2023-08-31T05:09:26.385396
| 2023-08-30T10:09:12
| 2023-08-30T10:09:12
| 68,793,814
| 400
| 56
|
NOASSERTION
| 2023-09-12T13:50:42
| 2016-09-21T07:57:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,836
|
py
|
test_configuration.py
|
# -*- coding: utf-8 -*-
import argparse
import pytest
def test_spec_and_value():
from temboardui.toolkit.configuration import OptionSpec, Value
spec = OptionSpec(section='temboard', name='verbose', default=False)
assert repr(spec)
value = Value('temboard_verbose', True, origin='test')
assert repr(value)
assert value.name in {spec}
def test_spec_lifetime(mocker):
from temboardui.toolkit.configuration import (
OptionSpec, MergedConfiguration, UserError,
)
def my_validator(x):
if x == '__ERROR__':
raise ValueError('Triggered error')
return x
config = MergedConfiguration()
environ = dict(TEMBOARD_MY_OPT=b'__ERROR__')
# Start with an empty configuration, nothing is loaded.
config.load(environ=environ)
assert 'my' not in config
# Extend configuration by adding a spec.
config.add_specs([OptionSpec(
'my', 'opt', default='defval', validator=my_validator,
)])
with pytest.raises(UserError):
config.load(environ=environ)
environ = dict(TEMBOARD_MY_OPT='envval')
config.load(environ=environ)
assert 'my' in config
assert 'envval' == config.my.opt
# Change source. That could be a file.
environ = dict(TEMBOARD_MY_OPT='new_envval')
config.load(environ=environ)
# Ensure envval is *not* reloaded
assert 'envval' == config.my.opt
# Assert source is properly reread.
config.load(environ=environ, reload_=True)
assert 'new_envval' == config.my.opt
def test_argument_for_spec(capsys):
from temboardui.toolkit.configuration import OptionSpec
parser = argparse.ArgumentParser()
spec = OptionSpec('section', 'name', default=2345)
spec.add_argument(
parser, "--section-name",
help="Name: percent%% %(default)s",
)
with pytest.raises(SystemExit):
parser.parse_args(['--help'])
out, _ = capsys.readouterr()
assert "--section-name SECTION_NAME" in out
assert "Name: percent% 2345" in out
args = parser.parse_args([])
assert not hasattr(args, 'section_name')
args = parser.parse_args(['--section-name=toto'])
assert 'toto' == args.section_name
def test_remove_specs():
from temboardui.toolkit.configuration import (
OptionSpec, MergedConfiguration
)
def my_validator(x):
if x == '__ERROR__':
raise ValueError('Triggered error')
return x
config = MergedConfiguration()
specs = [OptionSpec('my', 'opt', default='defval', validator=my_validator)]
config.add_specs(specs)
environ = dict(TEMBOARD_MY_OPT='envval')
config.load(environ=environ)
# Remove specs
config.remove_specs(specs)
# Ensure value is unloaded from config
assert 'my' not in config
# Ensure value is not reread from env. It should trigger an error if its
# read.
environ = dict(TEMBOARD_MY_OPT='__ERROR__')
config.load(environ=environ, reload_=True)
assert 'my' not in config
# Remove spec twice.
config.remove_specs(specs)
def test_load(mocker):
mocker.patch('temboardui.toolkit.configuration.os.chdir')
from argparse import Namespace
from temboardui.toolkit.configuration import (
OptionSpec, MergedConfiguration,
)
from temboardui.toolkit.app import configparser
s = 'temboard'
specs = [
# to test argument parsing
OptionSpec(section=s, name='fromarg', default='DEFVAL'),
# to test environment parsing
OptionSpec(section=s, name='fromenv', default='DEFVAL'),
# to test file loading
OptionSpec(section=s, name='fromfile', default='DEFVAL'),
# to test default value
OptionSpec(section=s, name='fromdefault', default='DEFVAL'),
]
args = Namespace(temboard_fromarg='ARGVAL')
environ = dict(
TEMBOARD_FROMENV='ENVVAL',
# These should be ignored.
TEMBOARD_FROMARG='ENVVAL',
PATH='',
)
parser = configparser.RawConfigParser()
parser.add_section(s)
parser.set(s, 'fromfile', 'FILEVAL')
# These should be ignored.
parser.set(s, 'fromenv', 'FILEVAL')
parser.set(s, 'fromarg', 'FILEVAL')
config = MergedConfiguration(specs=specs)
config.load(args=args, environ=environ, parser=parser, pwd='/pouet')
assert 'DEFVAL' == config.temboard.fromdefault
assert 'ARGVAL' == config.temboard.fromarg
assert 'ENVVAL' == config.temboard.fromenv
assert 'FILEVAL' == config.temboard.fromfile
def test_load_configparser():
from temboardui.toolkit.configuration import iter_configparser_values
from temboardui.toolkit.app import configparser
parser = configparser.RawConfigParser()
parser.add_section('section0')
parser.set('section0', 'option0', 'pouet')
values = list(iter_configparser_values(parser, 'my.cfg'))
assert 1 == len(values)
assert 'pouet' == values[0].value
assert 'section0_option0' == values[0].name
assert 'my.cfg' == values[0].origin
def test_pwd_denied(mocker):
mod = 'temboardui.toolkit.configuration'
mocker.patch(mod + '.iter_configparser_values')
cd = mocker.patch(mod + '.os.chdir')
from temboardui.toolkit.configuration import MergedConfiguration
config = MergedConfiguration()
config.temboard = dict(configfile='pouet')
cd.side_effect = [None, OSError()]
config.load(parser=mocker.Mock(name='parser'), pwd='/etc/pouet')
def test_required():
from temboardui.toolkit.configuration import (
MergedConfiguration, OptionSpec, UserError,
iter_defaults,
)
spec = OptionSpec('section', 'req', default=OptionSpec.REQUIRED)
config = MergedConfiguration(specs=[spec])
assert not list(iter_defaults(config.specs))
with pytest.raises(UserError):
config.check_required()
|
7f008e8c3fdc661f3a61cbd29165bdce176e8ef2
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/modules/test_global_higher_derivative_div.py
|
540272d2418b764dde3770b0c0d4aa4a9458c288
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 3,399
|
py
|
test_global_higher_derivative_div.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_global_div_grad_grad_impl(test_case, placement):
x_shape = [8, 8, 8, 8]
y_shape = [8, 8]
if random_bool().value():
x_shape, y_shape = y_shape, x_shape
x = random_tensor(len(x_shape), *x_shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
y = random_tensor(len(y_shape), *y_shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
z = torch.div(x, y)
init_grad_z = random_tensor(len(z.oneflow.shape), *z.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
init_grad_x = random_tensor(len(x.oneflow.shape), *x.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
init_grad_y = random_tensor(len(y.oneflow.shape), *y.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
dx_and_dy = torch.autograd.grad(z, [x, y], init_grad_z, True, True)
test_case.assertTrue(
np.allclose(
dx_and_dy.pytorch[0].detach().cpu().numpy(),
dx_and_dy.oneflow[0].detach().numpy(),
rtol=1e-4,
atol=1e-4,
)
)
test_case.assertTrue(
np.allclose(
dx_and_dy.pytorch[1].detach().cpu().numpy(),
dx_and_dy.oneflow[1].detach().numpy(),
rtol=1e-3,
atol=1e-4,
)
)
ddx_and_ddy_and_ddz = torch.autograd.grad(
dx_and_dy, [x, y, init_grad_z], [init_grad_x, init_grad_y], True, True
)
test_case.assertTrue(
np.allclose(
ddx_and_ddy_and_ddz.pytorch[0].detach().cpu().numpy(),
ddx_and_ddy_and_ddz.oneflow[0].detach().numpy(),
rtol=1e-3,
atol=1e-3,
)
)
test_case.assertTrue(
np.allclose(
ddx_and_ddy_and_ddz.pytorch[1].detach().cpu().numpy(),
ddx_and_ddy_and_ddz.oneflow[1].detach().numpy(),
rtol=1e-2,
atol=1e-3,
)
)
test_case.assertTrue(
np.allclose(
ddx_and_ddy_and_ddz.pytorch[2].detach().cpu().numpy(),
ddx_and_ddy_and_ddz.oneflow[2].detach().numpy(),
rtol=1e-3,
atol=1e-3,
)
)
class TestGlobalDivHigherDerivative(flow.unittest.TestCase):
@unittest.skip("skip for now, becase it failed 22 times in past week")
@globaltest
def test_global_div_grad_grad(test_case):
for placement in all_placement():
for i in range(1):
_test_global_div_grad_grad_impl(test_case, placement)
if __name__ == "__main__":
unittest.main()
|
b732b0d6c8e242d46cd22009ef3aac2e204b8b26
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/dcim/models/power.py
|
3377a9edb6df5c0f94490f617bc277712bf48a31
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,519
|
py
|
power.py
|
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext as _
from dcim.choices import *
from netbox.config import ConfigItem
from netbox.models import PrimaryModel
from utilities.validators import ExclusionValidator
from .device_components import CabledObjectModel, PathEndpoint
__all__ = (
'PowerFeed',
'PowerPanel',
)
#
# Power
#
class PowerPanel(PrimaryModel):
"""
A distribution point for electrical power; e.g. a data center RPP.
"""
site = models.ForeignKey(
to='Site',
on_delete=models.PROTECT
)
location = models.ForeignKey(
to='dcim.Location',
on_delete=models.PROTECT,
blank=True,
null=True
)
name = models.CharField(
max_length=100
)
# Generic relations
contacts = GenericRelation(
to='tenancy.ContactAssignment'
)
images = GenericRelation(
to='extras.ImageAttachment'
)
prerequisite_models = (
'dcim.Site',
)
class Meta:
ordering = ['site', 'name']
constraints = (
models.UniqueConstraint(
fields=('site', 'name'),
name='%(app_label)s_%(class)s_unique_site_name'
),
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('dcim:powerpanel', args=[self.pk])
def clean(self):
super().clean()
# Location must belong to assigned Site
if self.location and self.location.site != self.site:
raise ValidationError(
f"Location {self.location} ({self.location.site}) is in a different site than {self.site}"
)
class PowerFeed(PrimaryModel, PathEndpoint, CabledObjectModel):
"""
An electrical circuit delivered from a PowerPanel.
"""
power_panel = models.ForeignKey(
to='PowerPanel',
on_delete=models.PROTECT,
related_name='powerfeeds'
)
rack = models.ForeignKey(
to='Rack',
on_delete=models.PROTECT,
blank=True,
null=True
)
name = models.CharField(
max_length=100
)
status = models.CharField(
max_length=50,
choices=PowerFeedStatusChoices,
default=PowerFeedStatusChoices.STATUS_ACTIVE
)
type = models.CharField(
max_length=50,
choices=PowerFeedTypeChoices,
default=PowerFeedTypeChoices.TYPE_PRIMARY
)
supply = models.CharField(
max_length=50,
choices=PowerFeedSupplyChoices,
default=PowerFeedSupplyChoices.SUPPLY_AC
)
phase = models.CharField(
max_length=50,
choices=PowerFeedPhaseChoices,
default=PowerFeedPhaseChoices.PHASE_SINGLE
)
voltage = models.SmallIntegerField(
default=ConfigItem('POWERFEED_DEFAULT_VOLTAGE'),
validators=[ExclusionValidator([0])]
)
amperage = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1)],
default=ConfigItem('POWERFEED_DEFAULT_AMPERAGE')
)
max_utilization = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1), MaxValueValidator(100)],
default=ConfigItem('POWERFEED_DEFAULT_MAX_UTILIZATION'),
help_text=_("Maximum permissible draw (percentage)")
)
available_power = models.PositiveIntegerField(
default=0,
editable=False
)
clone_fields = (
'power_panel', 'rack', 'status', 'type', 'mark_connected', 'supply', 'phase', 'voltage', 'amperage',
'max_utilization',
)
prerequisite_models = (
'dcim.PowerPanel',
)
class Meta:
ordering = ['power_panel', 'name']
constraints = (
models.UniqueConstraint(
fields=('power_panel', 'name'),
name='%(app_label)s_%(class)s_unique_power_panel_name'
),
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('dcim:powerfeed', args=[self.pk])
def clean(self):
super().clean()
# Rack must belong to same Site as PowerPanel
if self.rack and self.rack.site != self.power_panel.site:
raise ValidationError("Rack {} ({}) and power panel {} ({}) are in different sites".format(
self.rack, self.rack.site, self.power_panel, self.power_panel.site
))
# AC voltage cannot be negative
if self.voltage < 0 and self.supply == PowerFeedSupplyChoices.SUPPLY_AC:
raise ValidationError({
"voltage": "Voltage cannot be negative for AC supply"
})
def save(self, *args, **kwargs):
# Cache the available_power property on the instance
kva = abs(self.voltage) * self.amperage * (self.max_utilization / 100)
if self.phase == PowerFeedPhaseChoices.PHASE_3PHASE:
self.available_power = round(kva * 1.732)
else:
self.available_power = round(kva)
super().save(*args, **kwargs)
@property
def parent_object(self):
return self.power_panel
def get_type_color(self):
return PowerFeedTypeChoices.colors.get(self.type)
def get_status_color(self):
return PowerFeedStatusChoices.colors.get(self.status)
|
5e5b2fb20b3b7a84f7874b89f83343abf9ffd014
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/tests/infer/reparam/test_discrete_cosine.py
|
17217e74a194f5b28fb256c7af769dd876115ac3
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,030
|
py
|
test_discrete_cosine.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from torch.autograd import grad
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.infer.reparam import DiscreteCosineReparam
from tests.common import assert_close
from .util import check_init_reparam
# Test helper to extract central moments from samples.
def get_moments(x):
n = x.size(0)
x = x.reshape(n, -1)
mean = x.mean(0)
x = x - mean
std = (x * x).mean(0).sqrt()
x = x / std
corr = (x.unsqueeze(-1) * x.unsqueeze(-2)).mean(0).reshape(-1)
return torch.cat([mean, std, corr])
@pytest.mark.parametrize("smooth", [0.0, 0.5, 1.0, 2.0])
@pytest.mark.parametrize(
"shape,dim",
[
((6,), -1),
(
(
2,
5,
),
-1,
),
((4, 2), -2),
((2, 3, 1), -2),
],
ids=str,
)
def test_normal(shape, dim, smooth):
loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()
scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()
def model():
with pyro.plate_stack("plates", shape[:dim]):
with pyro.plate("particles", 10000):
pyro.sample("x", dist.Normal(loc, scale).expand(shape).to_event(-dim))
value = poutine.trace(model).get_trace().nodes["x"]["value"]
expected_probe = get_moments(value)
rep = DiscreteCosineReparam(dim=dim, smooth=smooth)
reparam_model = poutine.reparam(model, {"x": rep})
trace = poutine.trace(reparam_model).get_trace()
assert isinstance(trace.nodes["x_dct"]["fn"], dist.TransformedDistribution)
assert isinstance(trace.nodes["x"]["fn"], dist.Delta)
value = trace.nodes["x"]["value"]
actual_probe = get_moments(value)
assert_close(actual_probe, expected_probe, atol=0.1)
for actual_m, expected_m in zip(actual_probe[:10], expected_probe[:10]):
expected_grads = grad(expected_m.sum(), [loc, scale], retain_graph=True)
actual_grads = grad(actual_m.sum(), [loc, scale], retain_graph=True)
assert_close(actual_grads[0], expected_grads[0], atol=0.05)
assert_close(actual_grads[1], expected_grads[1], atol=0.05)
@pytest.mark.parametrize("smooth", [0.0, 0.5, 1.0, 2.0])
@pytest.mark.parametrize(
"shape,dim",
[
((6,), -1),
(
(
2,
5,
),
-1,
),
((4, 2), -2),
((2, 3, 1), -2),
],
ids=str,
)
def test_uniform(shape, dim, smooth):
def model():
with pyro.plate_stack("plates", shape[:dim]):
with pyro.plate("particles", 10000):
pyro.sample("x", dist.Uniform(0, 1).expand(shape).to_event(-dim))
value = poutine.trace(model).get_trace().nodes["x"]["value"]
expected_probe = get_moments(value)
reparam_model = poutine.reparam(
model, {"x": DiscreteCosineReparam(dim=dim, smooth=smooth)}
)
trace = poutine.trace(reparam_model).get_trace()
assert isinstance(trace.nodes["x_dct"]["fn"], dist.TransformedDistribution)
assert isinstance(trace.nodes["x"]["fn"], dist.Delta)
value = trace.nodes["x"]["value"]
actual_probe = get_moments(value)
assert_close(actual_probe, expected_probe, atol=0.1)
@pytest.mark.parametrize("smooth", [0.0, 0.5, 1.0, 2.0])
@pytest.mark.parametrize(
"shape,dim",
[
((6,), -1),
(
(
2,
5,
),
-1,
),
((4, 2), -2),
((2, 3, 1), -2),
],
ids=str,
)
def test_init(shape, dim, smooth):
loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()
scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()
def model():
with pyro.plate_stack("plates", shape[:dim]):
return pyro.sample("x", dist.Normal(loc, scale).to_event(-dim))
check_init_reparam(model, DiscreteCosineReparam(dim=dim, smooth=smooth))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.