code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import time
from browserist import Browser
from config.item_page import ITEM_PAGE
from config.model import CheckOutPage, Item
def add_to_cart(browser: Browser, amount: int, item: Item):
browser.open.url(item.url)
for _ in range(amount - 1): # The item count always starts at 1.
browser.click.button(ITEM_PAGE.incremental_increase_amount_button)
time.sleep(0.1)
browser.click.button(ITEM_PAGE.add_to_cart_button)
if ITEM_PAGE.confirmation_modal:
browser.wait.for_element(ITEM_PAGE.confirmation_modal)
time.sleep(1)
def check_out(browser: Browser, check_out_page: CheckOutPage):
browser.open.url(check_out_page.url)
|
[
"time.sleep"
] |
[((374, 389), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (384, 389), False, 'import time\n'), ((553, 566), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (563, 566), False, 'import time\n')]
|
from random import randint, choice
from math import sin, cos, radians, exp, sqrt, fabs
import pygame
from pygame.sprite import Sprite
# from pygame.math import vec2d
from utils import SIM_COLORS, SCALE, SIGN
from utils import euclidean_distance, vec2d, Rotate2D
import numpy as np
class Agent(Sprite):
""" A agent sprite that bounces off walls and changes its
direction from time to time.
"""
# __slots__ = ('id', 'screen', 'game', 'field', 'image', \
# 'vmax', 'position', 'velocity', 'acceleration'\
# 'radius', 'relaxation_time', 'direction', 'neighbors'\
# 'forces, force_factors', 'waypoints')
def __init__(self, agent_id, screen, game, agent_image,
field, init_position, init_direction, max_speed, waypoints,
radius = 0.2, relaxation_time = 0.5, atype = 0):
""" Create a new Agent.
screen:
The screen on which the agent lives (must be a
pygame Surface object, such as pygame.display)
game:
The game object that holds information about the
game world.
agent_image:
Image reprsenting the agent in the simulation
field:
A Rect specifying the 'playing field' boundaries.
The agent will bounce off the 'walls' of this
field.
init_position:
A vec2d or a pair specifying the initial position
of the agent on the screen in metres
init_direction:
A vec2d or a pair specifying the initial direction
of the agent. Must have an angle that is a
multiple of 45 degres.
vmax:
maximum agent speed, in (m/s)
waypoints:
a list of waypoints for the agent to follow
"""
Sprite.__init__(self)
self._id = agent_id
self.screen = screen
self.game = game
self._vmax = max_speed
self._field = field
self._radius = radius
self._relaxation_time = relaxation_time
self._type = atype
# the current image representing the agent
self._image = agent_image
# A vector specifying the agent's position on the screen
self._position = vec2d(init_position)
self.prev_pos = vec2d(self._position)
# The direction is a normalized vector
self._direction = vec2d(init_direction).normalized()
self._velocity = vec2d(init_direction)
self._acceleration = vec2d(0.0, 0.0)
self._waypoints = waypoints
self._waypoint_index = 0
self._neighbors = []
# # default no forces
self._social_force = vec2d(0.0, 0.0)
self._desired_force = vec2d(0.0, 0.0)
self._obstacle_force = vec2d(0.0, 0.0)
self._lookahead_force = vec2d(0.0, 0.0)
def draw(self):
"""
Draw the agent onto the screen that is set in the constructor
"""
x, y = int(self._position.x*SCALE), int(self._position.y*SCALE)
r = int(self._radius*SCALE)
# poly = [(x-r/2, y), (x, y-40), (x+r/2, y), (x, y+r/2)]
poly = np.array([[x-r/2, y], [x, y-30], [x+r/2, y], [x, y+r/2]])
rpoly = Rotate2D(poly, (x,y), radians(self._direction.get_angle()))
# self.draw_rect = self._image.get_rect().move(
# self._position.x - self._image_w / 2,
# self._position.y - self._image_h / 2)
# self.screen.blit(self._image, self.draw_rect)
# agent representation
if self._type == 0:
pygame.draw.circle(self.screen, SIM_COLORS['yellow'], (x, y), r, int(0))
# pygame.draw.ellipse(self.screen, SIM_COLORS['yellow'], (x, y, 20, 50), int(0))
elif self._type == 1:
pygame.draw.circle(self.screen, SIM_COLORS['aqua'], (x, y), r, int(0))
# pygame.draw.polygon(self.screen, SIM_COLORS['white'], rpoly, int(0))
# pygame.draw.ellipse(self.screen, SIM_COLORS['white'], self._get_ellipse_params(x, y, r, r/2), int(0))
# draw the forces on the agent
self.draw_forces()
def draw_forces(self):
# desired force
pygame.draw.line(self.screen, SIM_COLORS['red'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.desired_force[0]*SCALE, (self._position.y*SCALE) + self.desired_force[1]*SCALE), 2)
# social force
pygame.draw.line(self.screen, SIM_COLORS['lime'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.social_force[0]*SCALE, (self._position.y*SCALE) + self.social_force[1]*SCALE), 2)
# obstacle force
pygame.draw.line(self.screen, SIM_COLORS['blue'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.obstacle_force[0]*SCALE, (self._position.y*SCALE) + self.obstacle_force[1]*SCALE), 2)
def reached_waypoint(self, waypoint):
""" Check if the agent has reached the given waypoint so we
advance to the next one. Reaching means being in the
waypoint circle
"""
if euclidean_distance((self._position.x, self._position.y), waypoint.position) <= waypoint.radius:
return True
else:
return False
def update(self, time_passed):
# cim = Image.open('assets/blueagent.bmp')
# rim = cim.rotate(self._direction.get_angle(), expand=1)
# self._image = pygame.image.fromstring(rim.tostring(), rim.size, rim.mode)
# When the image is rotated, its size is changed.
# self._image_w, self._image_h = self._image.get_size()
# bounds_rect = self.screen.get_rect().inflate(-self._image_w, -self._image_h)
bounds_rect = self.game.field_box.get_internal_rect()
self._direction = vec2d(self._velocity.x, -self._velocity.y)
if self._position.x*SCALE < bounds_rect.left:
self._position.x = bounds_rect.left/SCALE
self._direction.x *= -1
elif self._position.x*SCALE > bounds_rect.right:
self._position.x = bounds_rect.right/SCALE
self._direction.x *= -1
elif self._position.y*SCALE < bounds_rect.top:
self._position.y = bounds_rect.top/SCALE
self._direction.y *= -1
elif self._position.y*SCALE > bounds_rect.bottom:
self._position.y = bounds_rect.bottom/SCALE
self._direction.y *= -1
def social_move(self, time_passed):
# force is computed over neighbors with 0.5m radius (= 0.5*100 px)
self._neighbors = self.game.get_agent_neighbors(self, (0.5*SCALE))
# compute the forces
self._social_force = self._compute_social_force()
self._desired_force = self._compute_desired_force()
self._obstacle_force = self._compute_obstacle_force()
self._lookahead_force = self._compute_lookahead_force()
# =================================================================
# Properties and how to compute them
# =================================================================
@property
def social_force(self):
return self._social_force
@property
def obstacle_force(self):
return self._obstacle_force
@property
def desired_force(self):
return self._desired_force
@property
def lookahead_force(self):
return self._lookahead_force
@property
def id(self):
return self._id
@property
def position(self):
return self._position
@position.setter
def position(self, newpos):
self._position = newpos
@property
def velocity(self):
return self._velocity
@property
def acceleration(self):
return self._acceleration
@property
def vmax(self):
return self._vmax
@property
def relaxation_time(self):
return self._relaxation_time
@property
def next_waypoint(self):
return self._waypoints[self._waypoint_index]
def _compute_social_force(self):
# variables according to Moussaid-Helbing paper
lambda_importance = 2.0
gamma = 0.35
n, n_prime = 2, 3
social_force = vec2d(0, 0)
for neighbor in self._neighbors:
# no social force with oneself
if neighbor.id == self.id:
continue
else:
# position difference
diff = neighbor.position - self.position
diff_direction = diff.normalized()
# velocity difference
vel_diff = self.velocity - neighbor.velocity
# interaction direction t_ij
interaction_vector = lambda_importance * vel_diff + diff_direction
if (interaction_vector.get_length()) == 0:
continue;
interaction_direction = interaction_vector / interaction_vector.get_length()
# theta (angle between interaction direction and position difference vector)
theta = interaction_direction.get_angle_between(diff_direction)
# model parameter B = gamma * ||D||
B = gamma * interaction_vector.get_length()
theta_rad = radians(theta)
force_vel_amount = -exp(-diff.get_length() / B - (n_prime * B * theta_rad)**2)
force_angle_amount = (-1 * SIGN(theta)) * exp(-diff.get_length() / B - (n * B * theta_rad)**2)
force_vel = force_vel_amount * interaction_direction
force_angle = force_angle_amount * interaction_direction.left_normal_vector()
# social_force[0] += force_vel.x + force_angle.x
# social_force[1] += force_vel.y + force_angle.y
social_force += force_vel + force_angle
return social_force
def _compute_desired_force(self):
if self.reached_waypoint(self.next_waypoint):
self._waypoint_index += 1
# if all waypoints are covered, go back to the beginning
# NOTE - this does not take into account birth and death waypoints yet
if self._waypoint_index == len(self._waypoints):
self._waypoint_index = 0
wp_force = self.next_waypoint.force_towards(self)
desired_force = wp_force
return desired_force
def _compute_obstacle_force(self):
obstacle_force = vec2d(0.0, 0.0)
# if there are no obstacles, there is no obstacle force
if len(self.game.obstacles) == 0:
return obstacle_force
# find the closest obstacle and the closest point on it
closest_distance, closest_point = self.game.obstacles[0].agent_distance(self)
for obstacle in self.game.obstacles:
other_distance, other_point = obstacle.agent_distance(self)
if other_distance < closest_distance:
closest_distance, closest_point = other_distance, other_point
distance = closest_distance - self._radius
if closest_distance > self._radius*5:
return obstacle_force
force_amount = exp(-distance)
min_diffn = (self._position - vec2d(closest_point)).normalized()
obstacle_force.x = (force_amount * min_diffn).x
obstacle_force.y = (force_amount * min_diffn).y
return obstacle_force
def _compute_lookahead_force(self):
lookahead_force = vec2d(0, 0)
return lookahead_force
def _get_ellipse_params(self, x, y, w, h):
return ((x-w/2), (y-h/2), w, h)
|
[
"utils.SIGN",
"math.exp",
"pygame.draw.line",
"utils.vec2d",
"math.radians",
"utils.euclidean_distance",
"numpy.array",
"pygame.sprite.Sprite.__init__"
] |
[((2015, 2036), 'pygame.sprite.Sprite.__init__', 'Sprite.__init__', (['self'], {}), '(self)\n', (2030, 2036), False, 'from pygame.sprite import Sprite\n'), ((2487, 2507), 'utils.vec2d', 'vec2d', (['init_position'], {}), '(init_position)\n', (2492, 2507), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2532, 2553), 'utils.vec2d', 'vec2d', (['self._position'], {}), '(self._position)\n', (2537, 2553), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2688, 2709), 'utils.vec2d', 'vec2d', (['init_direction'], {}), '(init_direction)\n', (2693, 2709), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2739, 2754), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2744, 2754), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2914, 2929), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2919, 2929), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2960, 2975), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2965, 2975), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((3007, 3022), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (3012, 3022), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((3055, 3070), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (3060, 3070), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((3377, 3448), 'numpy.array', 'np.array', (['[[x - r / 2, y], [x, y - 30], [x + r / 2, y], [x, y + r / 2]]'], {}), '([[x - r / 2, y], [x, y - 30], [x + r / 2, y], [x, y + r / 2]])\n', (3385, 3448), True, 'import numpy as np\n'), ((4407, 4644), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', "SIM_COLORS['red']", '(self._position.x * SCALE, self._position.y * SCALE)', '(self._position.x * SCALE + self.desired_force[0] * SCALE, self._position.y *\n SCALE + self.desired_force[1] * SCALE)', '(2)'], {}), "(self.screen, SIM_COLORS['red'], (self._position.x * SCALE,\n self._position.y * SCALE), (self._position.x * SCALE + self.\n desired_force[0] * SCALE, self._position.y * SCALE + self.desired_force\n [1] * SCALE), 2)\n", (4423, 4644), False, 'import pygame\n'), ((4691, 4927), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', "SIM_COLORS['lime']", '(self._position.x * SCALE, self._position.y * SCALE)', '(self._position.x * SCALE + self.social_force[0] * SCALE, self._position.y *\n SCALE + self.social_force[1] * SCALE)', '(2)'], {}), "(self.screen, SIM_COLORS['lime'], (self._position.x * SCALE,\n self._position.y * SCALE), (self._position.x * SCALE + self.\n social_force[0] * SCALE, self._position.y * SCALE + self.social_force[1\n ] * SCALE), 2)\n", (4707, 4927), False, 'import pygame\n'), ((4976, 5216), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', "SIM_COLORS['blue']", '(self._position.x * SCALE, self._position.y * SCALE)', '(self._position.x * SCALE + self.obstacle_force[0] * SCALE, self._position.\n y * SCALE + self.obstacle_force[1] * SCALE)', '(2)'], {}), "(self.screen, SIM_COLORS['blue'], (self._position.x * SCALE,\n self._position.y * SCALE), (self._position.x * SCALE + self.\n obstacle_force[0] * SCALE, self._position.y * SCALE + self.\n obstacle_force[1] * SCALE), 2)\n", (4992, 5216), False, 'import pygame\n'), ((6178, 6220), 'utils.vec2d', 'vec2d', (['self._velocity.x', '(-self._velocity.y)'], {}), '(self._velocity.x, -self._velocity.y)\n', (6183, 6220), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((8589, 8600), 'utils.vec2d', 'vec2d', (['(0)', '(0)'], {}), '(0, 0)\n', (8594, 8600), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((10843, 10858), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (10848, 10858), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((11562, 11576), 'math.exp', 'exp', (['(-distance)'], {}), '(-distance)\n', (11565, 11576), False, 'from math import sin, cos, radians, exp, sqrt, fabs\n'), ((11862, 11873), 'utils.vec2d', 'vec2d', (['(0)', '(0)'], {}), '(0, 0)\n', (11867, 11873), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((5462, 5537), 'utils.euclidean_distance', 'euclidean_distance', (['(self._position.x, self._position.y)', 'waypoint.position'], {}), '((self._position.x, self._position.y), waypoint.position)\n', (5480, 5537), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2628, 2649), 'utils.vec2d', 'vec2d', (['init_direction'], {}), '(init_direction)\n', (2633, 2649), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((9671, 9685), 'math.radians', 'radians', (['theta'], {}), '(theta)\n', (9678, 9685), False, 'from math import sin, cos, radians, exp, sqrt, fabs\n'), ((11615, 11635), 'utils.vec2d', 'vec2d', (['closest_point'], {}), '(closest_point)\n', (11620, 11635), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((9824, 9835), 'utils.SIGN', 'SIGN', (['theta'], {}), '(theta)\n', (9828, 9835), False, 'from utils import SIM_COLORS, SCALE, SIGN\n')]
|
import wiringpi
import time
import sys
servo_pin = 18
motor1_pin = 23
motor2_pin = 24
SPI_CH = 0
READ_CH = 0
param = sys.argv
set_smell = param[1]
already_dgree = int(param[2])
set_dgree = int(param[3])
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode( motor1_pin, 1 )
wiringpi.pinMode( motor2_pin, 1 )
wiringpi.pinMode( servo_pin, 2 )
wiringpi.pwmSetMode(0)
wiringpi.pwmSetRange(1024)
wiringpi.pwmSetClock(375)
wiringpi.wiringPiSPISetup( SPI_CH, 1000000 )
def motor_t(num):
wiringpi.digitalWrite( motor1_pin, 0 )
wiringpi.digitalWrite( motor2_pin, 1 )
time.sleep(num)
def motor_f(num):
wiringpi.digitalWrite( motor1_pin, 1 )
wiringpi.digitalWrite( motor2_pin, 0 )
time.sleep(num)
def motor_stop():
wiringpi.digitalWrite( motor1_pin, 1 )
wiringpi.digitalWrite( motor2_pin, 1 )
def motor_init():
wiringpi.digitalWrite( motor1_pin, 0 )
wiringpi.digitalWrite( motor2_pin, 0 )
def angle(a, b):
move_deg = int( 81 + 41 / 90 * (a + b)*10 )
wiringpi.pwmWrite( servo_pin, move_deg )
time.sleep(1)
init_deg = int( 81 + 41 / 90 * (-90) )
wiringpi.pwmWrite( servo_pin, init_deg )
def rori(num):
flag = 0
while flag == 0:
buffer = 0x6800 | ( 0x1800 * READ_CH )
buffer = buffer.to_bytes( 2, byteorder='big' )
wiringpi.wiringPiSPIDataRW( SPI_CH, buffer )
ch0_value = (( buffer[0] * 256 + buffer[1] ) & 0x3ff) >> 4
deg = ch0_value - num
print (deg)
if (deg < 1) and (deg > -1):
motor_stop()
flag = 1
elif deg > 1:
motor_t(0.01)
motor_stop()
#time.sleep(1)
time.sleep(abs(int(1/deg)))
else:
motor_f(0.01)
motor_stop()
#time.sleep(1)
time.sleep(abs(int(1/deg)))
def easter_egg():
motor_t(0.5)
motor_f(0.5)
angle(0, 90)
motor_t(1)
motor_f(1)
angle(0, -90)
motor_t(2)
motor_f(2)
angle(0, 90)
motor_t(3)
motor_f(3)
angle(0, -90)
motor_t(4)
motor_f(4)
angle(0, 90)
motor_t(5)
motor_f(5)
angle(0, -90)
if (set_dgree + already_dgree) < 10:
if (set_smell == "X"):
print("Happy easterEgg")
motor_init()
easter_egg()
motor_stop()
print("by easterEgg")
elif (set_smell == "A"):
motor_init()
rori(8)
motor_stop()
angle(set_dgree, already_dgree)
print("できた1")
elif (set_smell == "B"):
motor_init()
rori(16)
motor_stop()
angle(set_dgree, already_dgree)
print("できた2")
elif (set_smell == "C"):
motor_init()
rori(24)
motor_stop()
angle(set_dgree, already_dgree)
print("できた3")
elif (set_smell == "D"):
motor_init()
rori(30)
motor_stop()
angle(set_dgree, already_dgree)
print("できた4")
elif (set_smell == "E"):
motor_init()
rori(36)
motor_stop()
angle(set_dgree, already_dgree)
print("できた5")
elif (set_smell == "F"):
motor_init()
rori(42)
motor_stop()
angle(set_dgree, already_dgree)
print("できた6")
elif (set_smell == "G"):
motor_init()
rori(52)
motor_stop()
angle(set_dgree, already_dgree)
print("できた7")
elif (set_smell == "H"):
motor_init()
rori(64)
motor_stop()
angle(set_dgree, already_dgree)
print("できた8")
else:
print("できてない")
else:
print("その角度は無理")
|
[
"wiringpi.pinMode",
"wiringpi.pwmSetClock",
"wiringpi.digitalWrite",
"wiringpi.pwmSetMode",
"time.sleep",
"wiringpi.pwmWrite",
"wiringpi.wiringPiSPIDataRW",
"wiringpi.pwmSetRange",
"wiringpi.wiringPiSPISetup",
"wiringpi.wiringPiSetupGpio"
] |
[((207, 235), 'wiringpi.wiringPiSetupGpio', 'wiringpi.wiringPiSetupGpio', ([], {}), '()\n', (233, 235), False, 'import wiringpi\n'), ((236, 267), 'wiringpi.pinMode', 'wiringpi.pinMode', (['motor1_pin', '(1)'], {}), '(motor1_pin, 1)\n', (252, 267), False, 'import wiringpi\n'), ((270, 301), 'wiringpi.pinMode', 'wiringpi.pinMode', (['motor2_pin', '(1)'], {}), '(motor2_pin, 1)\n', (286, 301), False, 'import wiringpi\n'), ((304, 334), 'wiringpi.pinMode', 'wiringpi.pinMode', (['servo_pin', '(2)'], {}), '(servo_pin, 2)\n', (320, 334), False, 'import wiringpi\n'), ((337, 359), 'wiringpi.pwmSetMode', 'wiringpi.pwmSetMode', (['(0)'], {}), '(0)\n', (356, 359), False, 'import wiringpi\n'), ((360, 386), 'wiringpi.pwmSetRange', 'wiringpi.pwmSetRange', (['(1024)'], {}), '(1024)\n', (380, 386), False, 'import wiringpi\n'), ((387, 412), 'wiringpi.pwmSetClock', 'wiringpi.pwmSetClock', (['(375)'], {}), '(375)\n', (407, 412), False, 'import wiringpi\n'), ((413, 455), 'wiringpi.wiringPiSPISetup', 'wiringpi.wiringPiSPISetup', (['SPI_CH', '(1000000)'], {}), '(SPI_CH, 1000000)\n', (438, 455), False, 'import wiringpi\n'), ((481, 517), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor1_pin', '(0)'], {}), '(motor1_pin, 0)\n', (502, 517), False, 'import wiringpi\n'), ((524, 560), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor2_pin', '(1)'], {}), '(motor2_pin, 1)\n', (545, 560), False, 'import wiringpi\n'), ((567, 582), 'time.sleep', 'time.sleep', (['num'], {}), '(num)\n', (577, 582), False, 'import time\n'), ((606, 642), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor1_pin', '(1)'], {}), '(motor1_pin, 1)\n', (627, 642), False, 'import wiringpi\n'), ((649, 685), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor2_pin', '(0)'], {}), '(motor2_pin, 0)\n', (670, 685), False, 'import wiringpi\n'), ((692, 707), 'time.sleep', 'time.sleep', (['num'], {}), '(num)\n', (702, 707), False, 'import time\n'), ((731, 767), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor1_pin', '(1)'], {}), '(motor1_pin, 1)\n', (752, 767), False, 'import wiringpi\n'), ((774, 810), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor2_pin', '(1)'], {}), '(motor2_pin, 1)\n', (795, 810), False, 'import wiringpi\n'), ((836, 872), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor1_pin', '(0)'], {}), '(motor1_pin, 0)\n', (857, 872), False, 'import wiringpi\n'), ((879, 915), 'wiringpi.digitalWrite', 'wiringpi.digitalWrite', (['motor2_pin', '(0)'], {}), '(motor2_pin, 0)\n', (900, 915), False, 'import wiringpi\n'), ((988, 1026), 'wiringpi.pwmWrite', 'wiringpi.pwmWrite', (['servo_pin', 'move_deg'], {}), '(servo_pin, move_deg)\n', (1005, 1026), False, 'import wiringpi\n'), ((1033, 1046), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1043, 1046), False, 'import time\n'), ((1094, 1132), 'wiringpi.pwmWrite', 'wiringpi.pwmWrite', (['servo_pin', 'init_deg'], {}), '(servo_pin, init_deg)\n', (1111, 1132), False, 'import wiringpi\n'), ((1295, 1337), 'wiringpi.wiringPiSPIDataRW', 'wiringpi.wiringPiSPIDataRW', (['SPI_CH', 'buffer'], {}), '(SPI_CH, buffer)\n', (1321, 1337), False, 'import wiringpi\n')]
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class AddressScope(neutron.NeutronResource):
"""A resource for Neutron address scope.
This resource can be associated with multiple subnet pools
in a one-to-many relationship. The subnet pools under an
address scope must not overlap.
"""
required_service_extension = 'address-scope'
support_status = support.SupportStatus(version='6.0.0')
PROPERTIES = (
NAME, SHARED, TENANT_ID, IP_VERSION,
) = (
'name', 'shared', 'tenant_id', 'ip_version',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('The name for the address scope.'),
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether the address scope should be shared to other '
'tenants. Note that the default policy setting '
'restricts usage of this attribute to administrative '
'users only, and restricts changing of shared address scope '
'to unshared with update.'),
default=False,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The owner tenant ID of the address scope. Only '
'administrative users can specify a tenant ID '
'other than their own.'),
constraints=[constraints.CustomConstraint('keystone.project')]
),
IP_VERSION: properties.Schema(
properties.Schema.INTEGER,
_('Address family of the address scope, which is 4 or 6.'),
default=4,
constraints=[
constraints.AllowedValues([4, 6]),
]
),
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
address_scope = self.client().create_address_scope(
{'address_scope': props})['address_scope']
self.resource_id_set(address_scope['id'])
def handle_delete(self):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
self.client().delete_address_scope(self.resource_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
self.client().update_address_scope(
self.resource_id,
{'address_scope': prop_diff})
def _show_resource(self):
return self.client().show_address_scope(
self.resource_id)['address_scope']
def resource_mapping():
return {
'OS::Neutron::AddressScope': AddressScope
}
|
[
"heat.common.i18n._",
"heat.engine.constraints.CustomConstraint",
"heat.engine.constraints.AllowedValues",
"heat.engine.support.SupportStatus"
] |
[((1103, 1141), 'heat.engine.support.SupportStatus', 'support.SupportStatus', ([], {'version': '"""6.0.0"""'}), "(version='6.0.0')\n", (1124, 1141), False, 'from heat.engine import support\n'), ((1386, 1422), 'heat.common.i18n._', '_', (['"""The name for the address scope."""'], {}), "('The name for the address scope.')\n", (1387, 1422), False, 'from heat.common.i18n import _\n'), ((1553, 1796), 'heat.common.i18n._', '_', (['"""Whether the address scope should be shared to other tenants. Note that the default policy setting restricts usage of this attribute to administrative users only, and restricts changing of shared address scope to unshared with update."""'], {}), "('Whether the address scope should be shared to other tenants. Note that the default policy setting restricts usage of this attribute to administrative users only, and restricts changing of shared address scope to unshared with update.'\n )\n", (1554, 1796), False, 'from heat.common.i18n import _\n'), ((2019, 2142), 'heat.common.i18n._', '_', (['"""The owner tenant ID of the address scope. Only administrative users can specify a tenant ID other than their own."""'], {}), "('The owner tenant ID of the address scope. Only administrative users can specify a tenant ID other than their own.'\n )\n", (2020, 2142), False, 'from heat.common.i18n import _\n'), ((2349, 2407), 'heat.common.i18n._', '_', (['"""Address family of the address scope, which is 4 or 6."""'], {}), "('Address family of the address scope, which is 4 or 6.')\n", (2350, 2407), False, 'from heat.common.i18n import _\n'), ((2198, 2246), 'heat.engine.constraints.CustomConstraint', 'constraints.CustomConstraint', (['"""keystone.project"""'], {}), "('keystone.project')\n", (2226, 2246), False, 'from heat.engine import constraints\n'), ((2474, 2507), 'heat.engine.constraints.AllowedValues', 'constraints.AllowedValues', (['[4, 6]'], {}), '([4, 6])\n', (2499, 2507), False, 'from heat.engine import constraints\n')]
|
import logging
import tempfile
from types import SimpleNamespace
from pathlib import Path
from typing import Any, Dict, Optional, Generator, Iterable, List, Tuple
import sqlalchemy as sqla
from sqlalchemy.orm import Session, sessionmaker
import pytest
from tweepy.models import User
from chainblocker import BlocklistDBBase, BlockList, BlockQueue, UnblockQueue
from chainblocker import __main__ as cli
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
class DummyTwitterUser():
"""Dummy twitter user
When used in a with statement, it automatically replaces the
cli.authenticate_interactive with this class, and then reimports the
cli module on exit.
"""
user_id = 0
original_function = cli.authenticate_interactive
users: Dict[str, "DummyTwitterUser"] = {}
users_int: Dict[int, "DummyTwitterUser"] = {}
#TODO: implement creation of dummies with pre-defined ids
def __init__(self, name: Optional[str] = None,
followers: Tuple[int, int] = 100, following: int = 100, blocked: int = 100
) -> None:
""""""
self.__class__.user_id += 1
self.user_id = self.__class__.user_id
LOGGER.debug("Creating new DummyTwitterUser with id %s", self.user_id)
if name:
self.screen_name = name
else:
self.screen_name = f"Dummy_{self.user_id}"
self.name = self.screen_name
#FIXME: implement creation of followers and followed accounts
self.follower_ids: List[int] = []
self.followed_ids: List[int] = []
self.blocked_ids: List[int] = []
self.followers_count = len(self.follower_ids)
self.friends_count = len(self.followed_ids)
self.user = SimpleNamespace(
id=self.user_id,
screen_name=self.screen_name,
followers_count=self.followers_count,
friends_count=self.friends_count
)
self.__class__.users[self.name] = self
self.__class__.users_int[self.user_id] = self
@property
def id(self):
return self.user_id
#FIXME: Implement all other methods
@classmethod
def get_user_by_name(cls, screen_name: str, create=True) -> "DummyTwitterUser":
"""Return dummy object, create one if name is not found"""
if screen_name not in cls.users:
if create:
LOGGER.debug("User '%s' not found, creating new dummy account", screen_name)
cls(screen_name)
else:
raise RuntimeError(f"User '{screen_name}' not found")
return cls.users[screen_name]
@classmethod
def get_user_by_id(cls, user_id: int, create=True) -> "DummyTwitterUser":
""""""
if user_id not in cls.users_int:
if create:
LOGGER.debug("User id '%s' not found, creating new dummy account", user_id)
raise NotImplementedError("Creation of users with known id unsupported")
cls(user_id)
else:
raise RuntimeError(f"User id '{user_id}' not found")
return cls.users_int[user_id]
class DummyAuthedUser(DummyTwitterUser):
def __init__(self, name: Optional[str] = None):
super().__init__(name)
self.api = SimpleNamespace(
create_block=self._api_create_block,
#blocks_ids=self._api_blocks_ids,
#destroy_blocks=self._api_destroy_blocks,
#followers_ids=self._api_followers_ids,
#friends_ids=self._api_friends_ids,
#get_user=self._api_get_user,
#me=self._api_me,
#rate_limit_status=self._api_rate_limit_status
)
def __enter__(self) -> "DummyTwitterUser":
cli.authenticate_interactive = self.__class__
return self
def __exit__(self, *exc: Any) -> None:
cli.authenticate_interactive = self.__class__.original_function
self.__class__.users = {}
self.__class__.users_int = {}
self.__class__.user_id = 0
# the following need to be overloaded
# TODO: api.create_block(user_id=queued_block.user_id)
# TODO: api.blocks_ids
# TODO: api.destroy_blocks
# TODO: api.followers_ids
# TODO: api.friends_ids
# TODO: api.get_user(screen_name=screen_name)
# TODO: api.get_user(user_id=user_id)
# TODO: api.me()
# TODO: api.rate_limit_status()
#
def _api_blocks_ids(self) -> Iterable[int]:
return self.blocked_ids
def _api_create_block(self, *args, user_id: int, **kwargs) -> User:
LOGGER.debug("blocking user %s", user_id)
return self.get_user_by_id(user_id)
#def _api_destroy_block -> User:
#def _api_followers_ids -> Iterable[int]:
#def _api_friends_ids -> Iterable[int]:
#def _api_get_user(*args, screen_name=None, user_id=None) -> User:
#def _api_me(self) -> User:
# return self.user
#def _api_rate_limit_status() -> Json
# for best results, the tweepy cursoring method should also be overloaded
# so that we only have to worry about overloading at one level of abstraction
# but since cursoring is only explicitly used in the 'get_*id*' methods of TwitterUser,
# the much easier and quicker approach is to overload those
# tweepy.Cursor(self.api.followers_ids, user_id=user_id).pages()):
def get_follower_ids(self) -> Generator[int, None, None]:
""""""
for user_id in self.follower_ids:
yield user_id
def get_follower_id_pages(self, page_limit: int = 1000) -> Generator[Iterable[int], None, None]:
""""""
first = 0
last = page_limit
while True:
page = self.follower_ids[first:last]
if not page:
break
yield page
first += page_limit
last += page_limit
def get_followed_ids(self) -> Generator[int, None, None]:
""""""
for user_id in self.followed_ids:
yield user_id
def get_followed_id_pages(self, page_limit: int = 1000) -> Generator[List[int], None, None]:
""""""
first = 0
last = page_limit
while True:
page = self.followed_ids[first:last]
if not page:
break
yield page
first += page_limit
last += page_limit
def get_blocked_id_pages(self, page_limit: int = 1000) -> Generator[List[int], None, None]:
""""""
first = 0
last = page_limit
while True:
page = self.blocked_ids[first:last]
if not page:
break
yield page
first += page_limit
last += page_limit
class DummyDBSession():
"""Use only through context manager.
"""
original_function = cli.create_db_session
current_session: Optional[Session] = None
def __init__(self, in_memory: bool, override_path: Optional[str] = None) -> None:
if not in_memory and not override_path:
raise ValueError("Path must be specified when not creating in-memory db")
self.dummy_accessed = 0
self.dummy_in_memory: bool = in_memory
self.dummy_override_path: Optional[str] = None
self.dummy_dbfile: Optional[str] = None
def __enter__(self) -> Session:
"""Create the session and overload cli"""
cli.create_db_session = self.dummy_create_session
#self.__class__.current_session = self.bound_session()
return self
def __exit__(self, *exc: Any) -> None:
if not self.__class__.current_session is None:
self.__class__.current_session.close()
self.__class__.current_session = None
cli.create_db_session = self.__class__.original_function
def __getattr__(self, name):
if name.startswith("dummy"):
raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{name}'")
# all attributes of this class must be prefixed with "dummy" to avoid
# accidentally accessing attributes of the session object
#XXX: this is unnecessarily clever and should be removed if this class gets any more complex
return getattr(self.__class__.current_session, name)
def dummy_create_session(self, path: Path, name: str,
suffix: str = "_blocklist.sqlite"
) -> Session:
""""""
LOGGER.debug("Creating db session: path=%s, name=%s, suffix=%s", path, name, suffix)
LOGGER.debug("Overrides: in_memory=%s, path=%s",
self.dummy_in_memory, self.dummy_override_path
)
self.dummy_accessed += 1
if self.current_session:
return self.current_session
# raise RuntimeError("ATTEMPTED TO CREATE MULTIPLE DB SESSIONS")
#FIXME: should check if attempts are made to create a session when one already exists
# in the context of caller's scope
# i.e. if multiple sessions are being created when invoking cli functions
if self.dummy_override_path:
path = self.override_path
if self.dummy_in_memory:
dbfile = ":memory:"
else:
dbfile = path / f"{name}{suffix}"
self.dummy_dbfile = dbfile
LOGGER.debug("dbfile = %s", dbfile)
sqla_engine = sqla.create_engine(f"sqlite:///{str(dbfile)}", echo=False)
BlocklistDBBase.metadata.create_all(sqla_engine)
bound_session = sessionmaker(bind=sqla_engine)
self.__class__.current_session = bound_session()
return self.__class__.current_session
###
### ACTUAL TESTS START HERE
###
def test_clean_start() -> None:
"""Verify that it is possible to create a fresh db on disk
"""
with tempfile.TemporaryDirectory() as tempdir:
with DummyAuthedUser("dummy") as u_dummy, \
DummyDBSession(in_memory=False, override_path=tempdir) as db_dummy:
LOGGER.info("STARTING: DB init test")
cli.main(paths=cli.get_workdirs(home=Path(tempdir)), args="")
assert db_dummy.dummy_accessed == 1
LOGGER.info("COMPLETED: DB init test")
LOGGER.info("STARTING: block test")
block_target = u_dummy.get_user_by_name("test_block_target")
cli.main(paths=cli.get_workdirs(home=Path(tempdir)),
args=f"block {block_target.screen_name}".split())
assert db_dummy.dummy_accessed == 2
amnt_queued = db_dummy.query(BlockQueue).count()
assert amnt_queued == 0, \
f"Block queue was not fully processed ({amnt_queued} still in queue)"
amnt_blocked = db_dummy.query(BlockList).count()
expected_blocked = len(block_target.follower_ids) + 1 # followers + target
assert amnt_blocked == expected_blocked, \
f"expected {expected_blocked} != blocked {amnt_blocked}"
LOGGER.info("COMPLETED: block test")
LOGGER.info("STARTING: unblock test")
with pytest.raises(NotImplementedError):
cli.main(paths=cli.get_workdirs(home=Path(tempdir)), args="unblock test_block_target".split())
assert db_dummy.dummy_accessed == 3
amnt_queued = db_dummy.query(UnblockQueue).count()
assert amnt_queued == 0, \
f"Unblock queue was not fully processed ({amnt_queued} still in queue)"
amnt_blocked = db_dummy.query(BlockList).count()
assert amnt_blocked == 0, "did not unblock everyone"
LOGGER.info("COMPLETED: unblock test")
|
[
"tempfile.TemporaryDirectory",
"chainblocker.BlocklistDBBase.metadata.create_all",
"pytest.raises",
"pathlib.Path",
"sqlalchemy.orm.sessionmaker",
"types.SimpleNamespace",
"logging.getLogger"
] |
[((416, 435), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (433, 435), False, 'import logging\n'), ((1749, 1887), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'id': 'self.user_id', 'screen_name': 'self.screen_name', 'followers_count': 'self.followers_count', 'friends_count': 'self.friends_count'}), '(id=self.user_id, screen_name=self.screen_name,\n followers_count=self.followers_count, friends_count=self.friends_count)\n', (1764, 1887), False, 'from types import SimpleNamespace\n'), ((3292, 3344), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'create_block': 'self._api_create_block'}), '(create_block=self._api_create_block)\n', (3307, 3344), False, 'from types import SimpleNamespace\n'), ((9466, 9514), 'chainblocker.BlocklistDBBase.metadata.create_all', 'BlocklistDBBase.metadata.create_all', (['sqla_engine'], {}), '(sqla_engine)\n', (9501, 9514), False, 'from chainblocker import BlocklistDBBase, BlockList, BlockQueue, UnblockQueue\n'), ((9539, 9569), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'sqla_engine'}), '(bind=sqla_engine)\n', (9551, 9569), False, 'from sqlalchemy.orm import Session, sessionmaker\n'), ((9825, 9854), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9852, 9854), False, 'import tempfile\n'), ((11112, 11146), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (11125, 11146), False, 'import pytest\n'), ((10100, 10113), 'pathlib.Path', 'Path', (['tempdir'], {}), '(tempdir)\n', (10104, 10113), False, 'from pathlib import Path\n'), ((10395, 10408), 'pathlib.Path', 'Path', (['tempdir'], {}), '(tempdir)\n', (10399, 10408), False, 'from pathlib import Path\n'), ((11201, 11214), 'pathlib.Path', 'Path', (['tempdir'], {}), '(tempdir)\n', (11205, 11214), False, 'from pathlib import Path\n')]
|
from __future__ import print_function, absolute_import
import time
import torch
from .utils.meters import AverageMeter
import torch.nn.functional as F
import numpy as np
class Trainer(object):
def __init__(self, model, model_inv):
super(Trainer, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = model
self.model_inv = model_inv
def train(self, epoch, target_train_loader, optimizer, num_batch=100,
all_pseudo_label='', init_intra_id_feat=''):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
# Target iter
target_iter = iter(target_train_loader)
# Train
#loss_print = {}
for batch_ind in range(num_batch):
data_time.update(time.time() - end)
loss_print = {}
try:
inputs = next(target_iter)
except:
target_iter = iter(target_train_loader)
inputs = next(target_iter)
### Target inputs
inputs_target = inputs[0].to(self.device)
index_target = inputs[3].to(self.device)
cam_target = inputs[4].to(self.device)
# Target loss
_, embed_feat = self.model(inputs_target)
loss = self.model_inv(embed_feat, index_target, cam_target, epoch=epoch, all_pseudo_label=all_pseudo_label,
batch_ind=batch_ind, init_intra_id_feat=init_intra_id_feat)
loss_print['memo_loss'] = loss.item()
losses.update(loss.item(), embed_feat.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
log = "Epoch: [{}][{}/{}], Time {:.3f} ({:.3f}), Data {:.3f} ({:.3f}), Loss {:.3f} ({:.3f})" \
.format(epoch, num_batch, num_batch,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg)
for tag, value in loss_print.items():
log += ", {}: {:.3f}".format(tag, value)
print(log)
|
[
"torch.cuda.is_available",
"time.time"
] |
[((703, 714), 'time.time', 'time.time', ([], {}), '()\n', (712, 714), False, 'import time\n'), ((1867, 1878), 'time.time', 'time.time', ([], {}), '()\n', (1876, 1878), False, 'import time\n'), ((322, 347), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (345, 347), False, 'import torch\n'), ((900, 911), 'time.time', 'time.time', ([], {}), '()\n', (909, 911), False, 'import time\n'), ((1830, 1841), 'time.time', 'time.time', ([], {}), '()\n', (1839, 1841), False, 'import time\n')]
|
__all__ = [
'PredictionSummaryComponent',
'ImportancesComponent',
'FeatureDescriptionsComponent',
'FeatureInputComponent',
'PdpComponent',
]
from math import ceil
import numpy as np
import pandas as pd
import dash
from dash import html, dcc, Input, Output, State, dash_table
from dash.exceptions import PreventUpdate
import dash_bootstrap_components as dbc
from ..dashboard_methods import *
from .. import to_html
class PredictionSummaryComponent(ExplainerComponent):
def __init__(self, explainer, title="Prediction Summary", name=None,
hide_index=False, hide_percentile=False,
hide_title=False, hide_subtitle=False, hide_selector=False,
pos_label=None, index=None, percentile=True,
description=None, **kwargs):
"""Shows a summary for a particular prediction
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Prediction Summary".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_index (bool, optional): hide index selector. Defaults to False.
hide_percentile (bool, optional): hide percentile toggle. Defaults to False.
hide_title (bool, optional): hide title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_selector (bool, optional): hide pos label selectors. Defaults to False.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
index ({int, str}, optional): Index to display prediction summary for. Defaults to None.
percentile (bool, optional): Whether to add the prediction percentile. Defaults to True.
"""
super().__init__(explainer, title, name)
self.index_name = 'modelprediction-index-'+self.name
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.H3(self.title),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:"),
dcc.Dropdown(id='modelprediction-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=6), hide=self.hide_index),
make_hideable(
dbc.Col([self.selector.layout()
], width=3), hide=self.hide_selector),
make_hideable(
dbc.Col([
dbc.Label("Show Percentile:"),
dbc.FormGroup(
[
dbc.RadioButton(
id='modelprediction-percentile-'+self.name,
className="form-check-input",
checked=self.percentile),
dbc.Label("Show percentile",
html_for='modelprediction-percentile'+self.name,
className="form-check-label"),
], check=True)
], md=3), hide=self.hide_percentile),
]),
dbc.Row([
dbc.Col([
dcc.Markdown(id='modelprediction-'+self.name),
], md=12)
])
])
])
def component_callbacks(self, app):
@app.callback(
Output('modelprediction-'+self.name, 'children'),
[Input('modelprediction-index-'+self.name, 'value'),
Input('modelprediction-percentile-'+self.name, 'checked'),
Input('pos-label-'+self.name, 'value')])
def update_output_div(index, include_percentile, pos_label):
if index is not None:
return self.explainer.prediction_result_markdown(index, include_percentile=include_percentile, pos_label=pos_label)
raise PreventUpdate
class ImportancesComponent(ExplainerComponent):
_state_props = dict(
depth=('importances-depth-', 'value'),
importance_type=('importances-permutation-or-shap-', 'value'),
pos_label=('pos-label-', 'value')
)
def __init__(self, explainer, title="Feature Importances", name=None,
subtitle="Which features had the biggest impact?",
hide_type=False, hide_depth=False, hide_popout=False,
hide_title=False, hide_subtitle=False, hide_selector=False,
pos_label=None, importance_type="shap", depth=None,
no_permutations=False,
description=None, **kwargs):
"""Display features importances component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Importances".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle(str, optional): Subtitle.
hide_type (bool, optional): Hide permutation/shap selector toggle.
Defaults to False.
hide_depth (bool, optional): Hide number of features toggle.
Defaults to False.
hide_popout (bool, optional): hide popout button
hide_title (bool, optional): hide title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_selector (bool, optional): hide pos label selectors.
Defaults to False.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
importance_type (str, {'permutation', 'shap'} optional):
initial importance type to display. Defaults to "shap".
depth (int, optional): Initial number of top features to display.
Defaults to None (=show all).
no_permutations (bool, optional): Do not use the permutation
importances for this component. Defaults to False.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
assert importance_type in ['shap', 'permutation'], \
"importance type must be either 'shap' or 'permutation'!"
if depth is not None:
self.depth = min(depth, len(explainer.columns_ranked_by_shap()))
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
if self.explainer.y_missing or self.no_permutations:
self.hide_type = True
self.importance_type = 'shap'
if self.description is None: self.description = f"""
Shows the features sorted from most important to least important. Can
be either sorted by absolute SHAP value (average absolute impact of
the feature on final prediction) or by permutation importance (how much
does the model get worse when you shuffle this feature, rendering it
useless?).
"""
self.popout = GraphPopout('importances-'+self.name+'popout',
'importances-graph-'+self.name, self.title, self.description)
self.register_dependencies('shap_values_df')
if not (self.hide_type and self.importance_type == 'shap'):
self.register_dependencies('permutation_importances')
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, className="card-title", id='importances-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='importances-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Importances type:"),
dbc.Select(
options=[
{'label': 'Permutation Importances',
'value': 'permutation'},
{'label': 'SHAP values',
'value': 'shap'}
],
value=self.importance_type,
id='importances-permutation-or-shap-'+self.name,
#inline=True,
),
], id='importances-permutation-or-shap-form-'+self.name),
dbc.Tooltip("Select Feature importance type: \n"
"Permutation Importance: How much does performance metric decrease when shuffling this feature?\n"
"SHAP values: What is the average SHAP contribution (positive or negative) of this feature?",
target='importances-permutation-or-shap-form-'+self.name),
], md=3), self.hide_type),
make_hideable(
dbc.Col([
html.Label('Depth:', id='importances-depth-label-'+self.name),
dbc.Select(id='importances-depth-'+self.name,
options = [{'label': str(i+1), 'value':i+1}
for i in range(self.explainer.n_features)],
value=self.depth),
dbc.Tooltip("Select how many features to display", target='importances-depth-label-'+self.name)
], md=2), self.hide_depth),
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector)
], form=True),
dbc.Row([
dbc.Col([
dcc.Loading(id='importances-graph-loading-'+self.name,
children=dcc.Graph(id='importances-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))),
]),
]),
dbc.Row([
make_hideable(
dbc.Col([
self.popout.layout()
], md=2, align="start"), hide=self.hide_popout),
], justify="end"),
])
])
def to_html(self, state_dict=None, add_header=True):
args = self.get_state_args(state_dict)
args['depth'] = None if args['depth'] is None else int(args['depth'])
fig = self.explainer.plot_importances(
kind=args['importance_type'], topx=args['depth'], pos_label=args['pos_label'])
html = to_html.card(to_html.fig(fig), title=self.title)
if add_header:
return to_html.add_header(html)
return html
def component_callbacks(self, app, **kwargs):
@app.callback(
Output('importances-graph-'+self.name, 'figure'),
[Input('importances-depth-'+self.name, 'value'),
Input('importances-permutation-or-shap-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')],
)
def update_importances(depth, permutation_shap, pos_label):
depth = None if depth is None else int(depth)
plot = self.explainer.plot_importances(
kind=permutation_shap, topx=depth, pos_label=pos_label)
return plot
class FeatureDescriptionsComponent(ExplainerComponent):
_state_props = dict(sort=('feature-descriptions-table-sort-', 'value'))
def __init__(self, explainer, title="Feature Descriptions", name=None,
hide_title=False, hide_sort=False,
sort='alphabet', **kwargs):
""" Display Feature Descriptions table.
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Importances".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide the title
hide_sort (bool, optional): hide the sort
sort (str, optional): how to sort the features, either 'alphabet'
or by mean abs shap ('shap')
"""
super().__init__(explainer, title, name)
if sort not in {'alphabet', 'shap'}:
raise ValueError("FeatureDesriptionsComponent parameter sort should be either"
"'alphabet' or 'shap'!")
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.H3(self.title),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Sort Features:"),
dbc.Select(
options=[
{'label': 'Alphabetically',
'value': 'alphabet'},
{'label': 'SHAP',
'value': 'shap'}
],
value=self.sort,
id='feature-descriptions-table-sort-'+self.name,
),
]),
dbc.Tooltip("Sort features either alphabetically or from highest "
"mean absolute SHAP value to lowest.",
target='feature-descriptions-table-sort-'+self.name),
], md=3), self.hide_sort),
], form=True),
dbc.Row([
dbc.Col([
html.Div(id='feature-descriptions-table-'+self.name)
]),
]),
])
])
def to_html(self, state_dict=None, add_header=True):
args = self.get_state_args(state_dict)
html = to_html.table_from_df(self.explainer.get_descriptions_df(sort=args['sort']))
html = to_html.card(html, title=self.title)
if add_header:
return to_html.add_header(html)
return html
def component_callbacks(self, app):
@app.callback(
Output('feature-descriptions-table-'+self.name, 'children'),
Input('feature-descriptions-table-sort-'+self.name, 'value')
)
def update_feature_descriptions_table(sort):
return dbc.Table.from_dataframe(self.explainer.get_descriptions_df(sort=sort))
class PdpComponent(ExplainerComponent):
_state_props = dict(
index=('pdp-index-', 'value'),
col=('pdp-col-', 'value'),
dropna=('pdp-dropna-', 'value'),
sample=('pdp-sample-', 'value'),
gridlines=('pdp-gridlines-', 'value'),
gridpoints=('pdp-gridpoints-', 'value'),
cats_sort=('pdp-categories-sort-', 'value'),
pos_label=('pos-label-', 'value')
)
def __init__(self, explainer, title="Partial Dependence Plot", name=None,
subtitle="How does the prediction change if you change one feature?",
hide_col=False, hide_index=False,
hide_title=False, hide_subtitle=False,
hide_footer=False, hide_selector=False, hide_popout=False,
hide_dropna=False, hide_sample=False,
hide_gridlines=False, hide_gridpoints=False, hide_cats_sort=False,
index_dropdown=True, feature_input_component=None,
pos_label=None, col=None, index=None,
dropna=True, sample=100, gridlines=50, gridpoints=10,
cats_sort='freq',
description=None, **kwargs):
"""Show Partial Dependence Plot component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Partial Dependence Plot".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_col (bool, optional): Hide feature selector. Defaults to False.
hide_index (bool, optional): Hide index selector. Defaults to False.
hide_title (bool, optional): Hide title, Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_footer (bool, optional): hide the footer at the bottom of the component
hide_selector (bool, optional): hide pos label selectors. Defaults to False.
hide_popout (bool, optional): hide popout button
hide_dropna (bool, optional): Hide drop na's toggle Defaults to False.
hide_sample (bool, optional): Hide sample size input. Defaults to False.
hide_gridlines (bool, optional): Hide gridlines input. Defaults to False.
hide_gridpoints (bool, optional): Hide gridpounts input. Defaults to False.
hide_cats_sort (bool, optional): Hide the categorical sorting dropdown. Defaults to False.
index_dropdown (bool, optional): Use dropdown for index input instead
of free text input. Defaults to True.
feature_input_component (FeatureInputComponent): A FeatureInputComponent
that will give the input to the graph instead of the index selector.
If not None, hide_index=True. Defaults to None.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
col (str, optional): Feature to display PDP for. Defaults to None.
index ({int, str}, optional): Index to add ice line to plot. Defaults to None.
dropna (bool, optional): Drop rows where values equal explainer.na_fill (usually -999). Defaults to True.
sample (int, optional): Sample size to calculate average partial dependence. Defaults to 100.
gridlines (int, optional): Number of ice lines to display in plot. Defaults to 50.
gridpoints (int, optional): Number of breakpoints on horizontal axis Defaults to 10.
cats_sort (str, optional): how to sort categories: 'alphabet',
'freq' or 'shap'. Defaults to 'freq'.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
self.index_name = 'pdp-index-'+self.name
if self.col is None:
self.col = self.explainer.columns_ranked_by_shap()[0]
if self.feature_input_component is not None:
self.exclude_callbacks(self.feature_input_component)
self.hide_index = True
if self.description is None: self.description = f"""
The partial dependence plot (pdp) show how the model prediction would
change if you change one particular feature. The plot shows you a sample
of observations and how these observations would change with this
feature (gridlines). The average effect is shown in grey. The effect
of changing the feature for a single {self.explainer.index_name} is
shown in blue. You can adjust how many observations to sample for the
average, how many gridlines to show, and how many points along the
x-axis to calculate model predictions for (gridpoints).
"""
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
self.index_selector = IndexSelector(explainer, 'pdp-index-'+self.name,
index=index, index_dropdown=index_dropdown)
self.popout = GraphPopout('pdp-'+self.name+'popout', 'pdp-graph-'+self.name, self.title, self.description)
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='pdp-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='pdp-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label("Feature:",
html_for='pdp-col'+self.name, id='pdp-col-label-'+self.name),
dbc.Tooltip("Select the feature for which you want to see the partial dependence plot",
target='pdp-col-label-'+self.name),
dbc.Select(id='pdp-col-'+self.name,
options=[{'label': col, 'value':col}
for col in self.explainer.columns_ranked_by_shap()],
value=self.col),
], md=4), hide=self.hide_col),
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:", id='pdp-index-label-'+self.name),
dbc.Tooltip(f"Select the {self.explainer.index_name} to display the partial dependence plot for",
target='pdp-index-label-'+self.name),
self.index_selector.layout(),
], md=4), hide=self.hide_index),
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector),
], form=True),
dbc.Row([
dbc.Col([
dcc.Loading(id='loading-pdp-graph-'+self.name,
children=[dcc.Graph(id='pdp-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))]),
])
]),
dbc.Row([
make_hideable(
dbc.Col([
self.popout.layout()
], md=2, align="start"), hide=self.hide_popout),
], justify="end"),
]),
make_hideable(
dbc.CardFooter([
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Drop fill:"),
dbc.Tooltip("Drop all observations with feature values "
f"equal to {self.explainer.na_fill} from the plot. "
"This prevents the filler values from ruining the x-axis.",
target='pdp-dropna-'+self.name),
dbc.Checklist(
options=[{"label": "Drop na_fill", "value": True}],
value=[True] if self.dropna else [],
id='pdp-dropna-'+self.name,
inline=True,
switch=True,
),
]),
]), hide=self.hide_dropna),
make_hideable(
dbc.Col([
dbc.Label("Sample:", id='pdp-sample-label-'+self.name ),
dbc.Tooltip("Number of observations to use to calculate average partial dependence",
target='pdp-sample-label-'+self.name ),
dbc.Input(id='pdp-sample-'+self.name, value=min(self.sample, len(self.explainer)),
type="number", min=0, max=len(self.explainer), step=1),
]), hide=self.hide_sample),
make_hideable(
dbc.Col([ #gridlines
dbc.Label("Gridlines:", id='pdp-gridlines-label-'+self.name ),
dbc.Tooltip("Number of individual observations' partial dependences to show in plot",
target='pdp-gridlines-label-'+self.name),
dbc.Input(id='pdp-gridlines-'+self.name, value=min(self.gridlines, len(self.explainer)),
type="number", min=0, max=len(self.explainer), step=1),
]), hide=self.hide_gridlines),
make_hideable(
dbc.Col([ #gridpoints
dbc.Label("Gridpoints:", id='pdp-gridpoints-label-'+self.name ),
dbc.Tooltip("Number of points to sample the feature axis for predictions."
" The higher, the smoother the curve, but takes longer to calculate",
target='pdp-gridpoints-label-'+self.name ),
dbc.Input(id='pdp-gridpoints-'+self.name, value=self.gridpoints,
type="number", min=0, max=100, step=1),
]), hide=self.hide_gridpoints),
make_hideable(
html.Div([
dbc.Col([
html.Label('Sort categories:', id='pdp-categories-sort-label-'+self.name),
dbc.Tooltip("How to sort the categories: Alphabetically, most common "
"first (Frequency), or highest mean absolute SHAP value first (Shap impact)",
target='pdp-categories-sort-label-'+self.name),
dbc.Select(id='pdp-categories-sort-'+self.name,
options = [{'label': 'Alphabetically', 'value': 'alphabet'},
{'label': 'Frequency', 'value': 'freq'},
{'label': 'Shap impact', 'value': 'shap'}],
value=self.cats_sort),
])],
id='pdp-categories-sort-div-'+self.name,
style={} if self.col in self.explainer.cat_cols else dict(display="none")
), hide=self.hide_cats_sort),
], form=True),
]), hide=self.hide_footer)
])
def get_state_tuples(self):
_state_tuples = super().get_state_tuples()
if self.feature_input_component is not None:
_state_tuples.extend(self.feature_input_component.get_state_tuples())
return sorted(list(set(_state_tuples)))
def to_html(self, state_dict=None, add_header=True):
args = self.get_state_args(state_dict)
if self.feature_input_component is None:
fig = self.explainer.plot_pdp(args['col'], args['index'],
drop_na=bool(args['dropna']), sample=args['sample'],
gridlines=args['gridlines'], gridpoints=args['gridpoints'],
sort=args['cats_sort'], pos_label=args['pos_label'])
html = to_html.fig(fig)
else:
inputs = {k:v for k,v in self.feature_input_component.get_state_args(state_dict).items() if k != 'index'}
inputs = list(inputs.values())
if len(inputs) == len(self.feature_input_component._input_features) and not any([i is None for i in inputs]):
X_row = self.explainer.get_row_from_input(inputs, ranked_by_shap=True)
fig = self.explainer.plot_pdp(args['col'], X_row=X_row,
drop_na=bool(args['dropna']), sample=args['sample'],
gridlines=args['gridlines'], gridpoints=args['gridpoints'],
sort=args['cats_sort'], pos_label=args['pos_label'])
html = to_html.fig(fig)
else:
html = f"<div>input data incorrect</div>"
html = to_html.card(html, title=self.title)
if add_header:
return to_html.add_header(html)
return html
def component_callbacks(self, app):
@app.callback(
Output('pdp-categories-sort-div-'+self.name, 'style'),
Input('pdp-col-'+self.name, 'value')
)
def update_pdp_sort_div(col):
return {} if col in self.explainer.cat_cols else dict(display="none")
if self.feature_input_component is None:
@app.callback(
Output('pdp-graph-'+self.name, 'figure'),
[Input('pdp-index-'+self.name, 'value'),
Input('pdp-col-'+self.name, 'value'),
Input('pdp-dropna-'+self.name, 'value'),
Input('pdp-sample-'+self.name, 'value'),
Input('pdp-gridlines-'+self.name, 'value'),
Input('pdp-gridpoints-'+self.name, 'value'),
Input('pdp-categories-sort-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')]
)
def update_pdp_graph(index, col, drop_na, sample, gridlines, gridpoints, sort, pos_label):
if index is None or not self.explainer.index_exists(index):
raise PreventUpdate
return self.explainer.plot_pdp(col, index,
drop_na=bool(drop_na), sample=sample, gridlines=gridlines,
gridpoints=gridpoints, sort=sort, pos_label=pos_label)
else:
@app.callback(
Output('pdp-graph-'+self.name, 'figure'),
[Input('pdp-col-'+self.name, 'value'),
Input('pdp-dropna-'+self.name, 'value'),
Input('pdp-sample-'+self.name, 'value'),
Input('pdp-gridlines-'+self.name, 'value'),
Input('pdp-gridpoints-'+self.name, 'value'),
Input('pdp-categories-sort-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value'),
*self.feature_input_component._feature_callback_inputs]
)
def update_pdp_graph(col, drop_na, sample, gridlines, gridpoints, sort, pos_label, *inputs):
X_row = self.explainer.get_row_from_input(inputs, ranked_by_shap=True)
return self.explainer.plot_pdp(col, X_row=X_row,
drop_na=bool(drop_na), sample=sample, gridlines=gridlines,
gridpoints=gridpoints, sort=sort, pos_label=pos_label)
class FeatureInputComponent(ExplainerComponent):
def __init__(self, explainer, title="Feature Input", name=None,
subtitle="Adjust the feature values to change the prediction",
hide_title=False, hide_subtitle=False, hide_index=False,
hide_range=False,
index=None, n_input_cols=4, sort_features='shap',
fill_row_first=True, description=None, **kwargs):
"""Interaction Dependence Component.
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"What if...".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): hide the title
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_index (bool, optional): hide the index selector
hide_range (bool, optional): hide the range label under the inputs
index (str, int, optional): default index
n_input_cols (int, optional): number of columns to split features inputs in.
Defaults to 4.
sort_features(str, optional): how to sort the features. For now only options
is 'shap' to sort by mean absolute shap value.
fill_row_first (bool, optional): if True most important features will
be on top row, if False they will be in most left column.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
assert len(explainer.columns) == len(set(explainer.columns)), \
"Not all X column names are unique, so cannot launch FeatureInputComponent component/tab!"
self.index_name = 'feature-input-index-'+self.name
self._feature_callback_inputs = [
Input('feature-input-'+feature+'-input-'+self.name, 'value')
for feature in self.explainer.columns_ranked_by_shap()]
self._feature_callback_outputs = [
Output('feature-input-'+feature+'-input-'+self.name, 'value')
for feature in self.explainer.columns_ranked_by_shap()]
if self.sort_features == 'shap':
self._input_features = self.explainer.columns_ranked_by_shap()
elif self.sort_features == 'alphabet':
self._input_features = sorted(self.explainer.merged_cols.tolist())
else:
raise ValueError(f"parameter sort_features should be either 'shap', "
"or 'alphabet', but you passed sort_features='{self.sort_features}'")
self._feature_inputs = [
self._generate_dash_input(
feature, self.explainer.onehot_cols, self.explainer.onehot_dict, self.explainer.categorical_dict)
for feature in self._input_features]
self._state_props = {feature: (f'feature-input-{feature}-input-', 'value')
for feature in self._input_features}
self._state_props['index'] = ('feature-input-index-', 'value')
if self.description is None: self.description = """
Adjust the input values to see predictions for what if scenarios."""
def _generate_dash_input(self, col, onehot_cols, onehot_dict, cat_dict):
if col in cat_dict:
col_values = cat_dict[col]
return dbc.FormGroup([
dbc.Label(col),
dcc.Dropdown(id='feature-input-'+col+'-input-'+self.name,
options=[dict(label=col_val, value=col_val) for col_val in col_values],
clearable=False),
dbc.FormText(f"Select any {col}") if not self.hide_range else None,
])
elif col in onehot_cols:
col_values = [c for c in onehot_dict[col]]
display_values = [
col_val[len(col)+1:] if col_val.startswith(col+"_") else col_val
for col_val in col_values]
if any(self.explainer.X[self.explainer.onehot_dict[col]].sum(axis=1) == 0):
col_values.append(self.explainer.onehot_notencoded[col])
display_values.append(self.explainer.onehot_notencoded[col])
return dbc.FormGroup([
dbc.Label(col),
dcc.Dropdown(id='feature-input-'+col+'-input-'+self.name,
options=[dict(label=display, value=col_val)
for display, col_val in zip(display_values, col_values)],
clearable=False),
dbc.FormText(f"Select any {col}") if not self.hide_range else None,
])
else:
min_range = np.round(self.explainer.X[col][lambda x: x != self.explainer.na_fill].min(), 2)
max_range = np.round(self.explainer.X[col][lambda x: x != self.explainer.na_fill].max(), 2)
return dbc.FormGroup([
dbc.Label(col),
dbc.Input(id='feature-input-'+col+'-input-'+self.name, type="number"),
dbc.FormText(f"Range: {min_range}-{max_range}") if not self.hide_range else None
])
def get_slices_cols_first(self, n_inputs, n_cols=2):
"""returns a list of slices to divide n inputs into n_cols columns,
filling columns first"""
if n_inputs < n_cols:
n_cols = n_inputs
rows_per_col = ceil(n_inputs / n_cols)
slices = []
for col in range(n_cols):
if col == n_cols-1 and n_inputs % rows_per_col > 0:
slices.append(slice(col*rows_per_col, col*rows_per_col+(n_inputs % rows_per_col)))
else:
slices.append(slice(col*rows_per_col, col*rows_per_col+rows_per_col))
return slices
def get_slices_rows_first(self, n_inputs, n_cols=3):
"""returns a list of slices to divide n inputs into n_cols columns,
filling columns first"""
if n_inputs < n_cols:
slices = [slice(i, i+1, 1) for i in range(n_inputs)]
else:
slices = [slice(i, 1+i+(ceil(n_inputs/n_cols)-1)*n_cols, n_cols)
if i+n_cols*(ceil(n_inputs/n_cols)-1) < n_inputs else
slice(i, 1+i+(int(n_inputs/n_cols)-1)*n_cols, n_cols)
for i in range(n_cols)]
return slices
def layout(self):
if self.fill_row_first:
input_row = dbc.Row([dbc.Col(self._feature_inputs[slicer])
for slicer in self.get_slices_rows_first(len(self._feature_inputs), self.n_input_cols)])
else:
input_row = dbc.Row([dbc.Col(self._feature_inputs[slicer])
for slicer in self.get_slices_cols_first(len(self._feature_inputs), self.n_input_cols)])
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='feature-input-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='feature-input-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:"),
dcc.Dropdown(id='feature-input-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.get_index_list()],
value=self.index)
], md=4), hide=self.hide_index),
], form=True),
input_row,
])
])
def to_html(self, state_dict=None, add_header=True):
args = self.get_state_args(state_dict)
html_inputs = [to_html.input(feature, args.get(feature, None), disabled=True) for feature in self._input_features]
html = to_html.hide(f"Selected: <b>{args['index']}</b>", hide=self.hide_index)
if self.fill_row_first:
html += to_html.row(*["".join(html_inputs[slicer]) for slicer in self.get_slices_rows_first(len(self._input_features), self.n_input_cols)])
else:
html += to_html.row(*["".join(html_inputs[slicer]) for slicer in self.get_slices_cols_first(len(self._input_features), self.n_input_cols)])
html = to_html.card(html, title=self.title, subtitle=self.subtitle)
if add_header:
return to_html.add_header(html)
return html
def component_callbacks(self, app):
@app.callback(
[*self._feature_callback_outputs],
[Input('feature-input-index-'+self.name, 'value')]
)
def update_whatif_inputs(index):
if index is None or not self.explainer.index_exists(index):
raise PreventUpdate
X_row = self.explainer.get_X_row(index, merge=True)[self.explainer.columns_ranked_by_shap()]
return X_row.values[0].tolist()
|
[
"dash_bootstrap_components.RadioButton",
"dash_bootstrap_components.Label",
"dash.Output",
"dash_bootstrap_components.Input",
"dash_bootstrap_components.Select",
"dash_bootstrap_components.Tooltip",
"math.ceil",
"dash.html.Div",
"dash_bootstrap_components.Col",
"dash_bootstrap_components.FormText",
"dash.Input",
"dash.dcc.Markdown",
"dash.html.H6",
"dash.html.Label",
"dash_bootstrap_components.Checklist",
"dash.html.H3"
] |
[((39603, 39626), 'math.ceil', 'ceil', (['(n_inputs / n_cols)'], {}), '(n_inputs / n_cols)\n', (39607, 39626), False, 'from math import ceil\n'), ((4323, 4373), 'dash.Output', 'Output', (["('modelprediction-' + self.name)", '"""children"""'], {}), "('modelprediction-' + self.name, 'children')\n", (4329, 4373), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((12742, 12792), 'dash.Output', 'Output', (["('importances-graph-' + self.name)", '"""figure"""'], {}), "('importances-graph-' + self.name, 'figure')\n", (12748, 12792), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((16604, 16665), 'dash.Output', 'Output', (["('feature-descriptions-table-' + self.name)", '"""children"""'], {}), "('feature-descriptions-table-' + self.name, 'children')\n", (16610, 16665), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((16677, 16739), 'dash.Input', 'Input', (["('feature-descriptions-table-sort-' + self.name)", '"""value"""'], {}), "('feature-descriptions-table-sort-' + self.name, 'value')\n", (16682, 16739), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31264, 31319), 'dash.Output', 'Output', (["('pdp-categories-sort-div-' + self.name)", '"""style"""'], {}), "('pdp-categories-sort-div-' + self.name, 'style')\n", (31270, 31319), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31331, 31369), 'dash.Input', 'Input', (["('pdp-col-' + self.name)", '"""value"""'], {}), "('pdp-col-' + self.name, 'value')\n", (31336, 31369), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((35918, 35984), 'dash.Input', 'Input', (["('feature-input-' + feature + '-input-' + self.name)", '"""value"""'], {}), "('feature-input-' + feature + '-input-' + self.name, 'value')\n", (35923, 35984), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((36119, 36186), 'dash.Output', 'Output', (["('feature-input-' + feature + '-input-' + self.name)", '"""value"""'], {}), "('feature-input-' + feature + '-input-' + self.name, 'value')\n", (36125, 36186), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((4386, 4438), 'dash.Input', 'Input', (["('modelprediction-index-' + self.name)", '"""value"""'], {}), "('modelprediction-index-' + self.name, 'value')\n", (4391, 4438), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((4451, 4510), 'dash.Input', 'Input', (["('modelprediction-percentile-' + self.name)", '"""checked"""'], {}), "('modelprediction-percentile-' + self.name, 'checked')\n", (4456, 4510), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((4523, 4563), 'dash.Input', 'Input', (["('pos-label-' + self.name)", '"""value"""'], {}), "('pos-label-' + self.name, 'value')\n", (4528, 4563), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((12805, 12853), 'dash.Input', 'Input', (["('importances-depth-' + self.name)", '"""value"""'], {}), "('importances-depth-' + self.name, 'value')\n", (12810, 12853), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((12866, 12928), 'dash.Input', 'Input', (["('importances-permutation-or-shap-' + self.name)", '"""value"""'], {}), "('importances-permutation-or-shap-' + self.name, 'value')\n", (12871, 12928), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((12941, 12981), 'dash.Input', 'Input', (["('pos-label-' + self.name)", '"""value"""'], {}), "('pos-label-' + self.name, 'value')\n", (12946, 12981), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31599, 31641), 'dash.Output', 'Output', (["('pdp-graph-' + self.name)", '"""figure"""'], {}), "('pdp-graph-' + self.name, 'figure')\n", (31605, 31641), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32621, 32663), 'dash.Output', 'Output', (["('pdp-graph-' + self.name)", '"""figure"""'], {}), "('pdp-graph-' + self.name, 'figure')\n", (32627, 32663), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((43137, 43187), 'dash.Input', 'Input', (["('feature-input-index-' + self.name)", '"""value"""'], {}), "('feature-input-index-' + self.name, 'value')\n", (43142, 43187), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31658, 31698), 'dash.Input', 'Input', (["('pdp-index-' + self.name)", '"""value"""'], {}), "('pdp-index-' + self.name, 'value')\n", (31663, 31698), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31715, 31753), 'dash.Input', 'Input', (["('pdp-col-' + self.name)", '"""value"""'], {}), "('pdp-col-' + self.name, 'value')\n", (31720, 31753), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31770, 31811), 'dash.Input', 'Input', (["('pdp-dropna-' + self.name)", '"""value"""'], {}), "('pdp-dropna-' + self.name, 'value')\n", (31775, 31811), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31828, 31869), 'dash.Input', 'Input', (["('pdp-sample-' + self.name)", '"""value"""'], {}), "('pdp-sample-' + self.name, 'value')\n", (31833, 31869), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31886, 31930), 'dash.Input', 'Input', (["('pdp-gridlines-' + self.name)", '"""value"""'], {}), "('pdp-gridlines-' + self.name, 'value')\n", (31891, 31930), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((31947, 31992), 'dash.Input', 'Input', (["('pdp-gridpoints-' + self.name)", '"""value"""'], {}), "('pdp-gridpoints-' + self.name, 'value')\n", (31952, 31992), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32009, 32059), 'dash.Input', 'Input', (["('pdp-categories-sort-' + self.name)", '"""value"""'], {}), "('pdp-categories-sort-' + self.name, 'value')\n", (32014, 32059), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32076, 32116), 'dash.Input', 'Input', (["('pos-label-' + self.name)", '"""value"""'], {}), "('pos-label-' + self.name, 'value')\n", (32081, 32116), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32680, 32718), 'dash.Input', 'Input', (["('pdp-col-' + self.name)", '"""value"""'], {}), "('pdp-col-' + self.name, 'value')\n", (32685, 32718), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32735, 32776), 'dash.Input', 'Input', (["('pdp-dropna-' + self.name)", '"""value"""'], {}), "('pdp-dropna-' + self.name, 'value')\n", (32740, 32776), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32793, 32834), 'dash.Input', 'Input', (["('pdp-sample-' + self.name)", '"""value"""'], {}), "('pdp-sample-' + self.name, 'value')\n", (32798, 32834), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32851, 32895), 'dash.Input', 'Input', (["('pdp-gridlines-' + self.name)", '"""value"""'], {}), "('pdp-gridlines-' + self.name, 'value')\n", (32856, 32895), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32912, 32957), 'dash.Input', 'Input', (["('pdp-gridpoints-' + self.name)", '"""value"""'], {}), "('pdp-gridpoints-' + self.name, 'value')\n", (32917, 32957), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((32974, 33024), 'dash.Input', 'Input', (["('pdp-categories-sort-' + self.name)", '"""value"""'], {}), "('pdp-categories-sort-' + self.name, 'value')\n", (32979, 33024), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((33041, 33081), 'dash.Input', 'Input', (["('pos-label-' + self.name)", '"""value"""'], {}), "('pos-label-' + self.name, 'value')\n", (33046, 33081), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((37520, 37534), 'dash_bootstrap_components.Label', 'dbc.Label', (['col'], {}), '(col)\n', (37529, 37534), True, 'import dash_bootstrap_components as dbc\n'), ((40652, 40689), 'dash_bootstrap_components.Col', 'dbc.Col', (['self._feature_inputs[slicer]'], {}), '(self._feature_inputs[slicer])\n', (40659, 40689), True, 'import dash_bootstrap_components as dbc\n'), ((40855, 40892), 'dash_bootstrap_components.Col', 'dbc.Col', (['self._feature_inputs[slicer]'], {}), '(self._feature_inputs[slicer])\n', (40862, 40892), True, 'import dash_bootstrap_components as dbc\n'), ((37783, 37816), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['f"""Select any {col}"""'], {}), "(f'Select any {col}')\n", (37795, 37816), True, 'import dash_bootstrap_components as dbc\n'), ((38413, 38427), 'dash_bootstrap_components.Label', 'dbc.Label', (['col'], {}), '(col)\n', (38422, 38427), True, 'import dash_bootstrap_components as dbc\n'), ((39114, 39128), 'dash_bootstrap_components.Label', 'dbc.Label', (['col'], {}), '(col)\n', (39123, 39128), True, 'import dash_bootstrap_components as dbc\n'), ((39150, 39225), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'id': "('feature-input-' + col + '-input-' + self.name)", 'type': '"""number"""'}), "(id='feature-input-' + col + '-input-' + self.name, type='number')\n", (39159, 39225), True, 'import dash_bootstrap_components as dbc\n'), ((2438, 2457), 'dash.html.H3', 'html.H3', (['self.title'], {}), '(self.title)\n', (2445, 2457), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((14761, 14780), 'dash.html.H3', 'html.H3', (['self.title'], {}), '(self.title)\n', (14768, 14780), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((38747, 38780), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['f"""Select any {col}"""'], {}), "(f'Select any {col}')\n", (38759, 38780), True, 'import dash_bootstrap_components as dbc\n'), ((39241, 39288), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['f"""Range: {min_range}-{max_range}"""'], {}), "(f'Range: {min_range}-{max_range}')\n", (39253, 39288), True, 'import dash_bootstrap_components as dbc\n'), ((8844, 8929), 'dash.html.H3', 'html.H3', (['self.title'], {'className': '"""card-title"""', 'id': "('importances-title-' + self.name)"}), "(self.title, className='card-title', id='importances-title-' + self.name\n )\n", (8851, 8929), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((9063, 9133), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['self.description'], {'target': "('importances-title-' + self.name)"}), "(self.description, target='importances-title-' + self.name)\n", (9074, 9133), True, 'import dash_bootstrap_components as dbc\n'), ((22640, 22688), 'dash.html.H3', 'html.H3', (['self.title'], {'id': "('pdp-title-' + self.name)"}), "(self.title, id='pdp-title-' + self.name)\n", (22647, 22688), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((22835, 22897), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['self.description'], {'target': "('pdp-title-' + self.name)"}), "(self.description, target='pdp-title-' + self.name)\n", (22846, 22897), True, 'import dash_bootstrap_components as dbc\n'), ((40366, 40389), 'math.ceil', 'ceil', (['(n_inputs / n_cols)'], {}), '(n_inputs / n_cols)\n', (40370, 40389), False, 'from math import ceil\n'), ((40287, 40310), 'math.ceil', 'ceil', (['(n_inputs / n_cols)'], {}), '(n_inputs / n_cols)\n', (40291, 40310), False, 'from math import ceil\n'), ((41160, 41218), 'dash.html.H3', 'html.H3', (['self.title'], {'id': "('feature-input-title-' + self.name)"}), "(self.title, id='feature-input-title-' + self.name)\n", (41167, 41218), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((41365, 41437), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['self.description'], {'target': "('feature-input-title-' + self.name)"}), "(self.description, target='feature-input-title-' + self.name)\n", (41376, 41437), True, 'import dash_bootstrap_components as dbc\n'), ((4121, 4168), 'dash.dcc.Markdown', 'dcc.Markdown', ([], {'id': "('modelprediction-' + self.name)"}), "(id='modelprediction-' + self.name)\n", (4133, 4168), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((8962, 9011), 'dash.html.H6', 'html.H6', (['self.subtitle'], {'className': '"""card-subtitle"""'}), "(self.subtitle, className='card-subtitle')\n", (8969, 9011), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((16065, 16119), 'dash.html.Div', 'html.Div', ([], {'id': "('feature-descriptions-table-' + self.name)"}), "(id='feature-descriptions-table-' + self.name)\n", (16073, 16119), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((22730, 22779), 'dash.html.H6', 'html.H6', (['self.subtitle'], {'className': '"""card-subtitle"""'}), "(self.subtitle, className='card-subtitle')\n", (22737, 22779), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((41260, 41309), 'dash.html.H6', 'html.H6', (['self.subtitle'], {'className': '"""card-subtitle"""'}), "(self.subtitle, className='card-subtitle')\n", (41267, 41309), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((2654, 2696), 'dash_bootstrap_components.Label', 'dbc.Label', (['f"""{self.explainer.index_name}:"""'], {}), "(f'{self.explainer.index_name}:')\n", (2663, 2696), True, 'import dash_bootstrap_components as dbc\n'), ((3311, 3340), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Show Percentile:"""'], {}), "('Show Percentile:')\n", (3320, 3340), True, 'import dash_bootstrap_components as dbc\n'), ((10202, 10509), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Select Feature importance type: \nPermutation Importance: How much does performance metric decrease when shuffling this feature?\nSHAP values: What is the average SHAP contribution (positive or negative) of this feature?"""'], {'target': "('importances-permutation-or-shap-form-' + self.name)"}), '(\n """Select Feature importance type: \nPermutation Importance: How much does performance metric decrease when shuffling this feature?\nSHAP values: What is the average SHAP contribution (positive or negative) of this feature?"""\n , target=\'importances-permutation-or-shap-form-\' + self.name)\n', (10213, 10509), True, 'import dash_bootstrap_components as dbc\n'), ((10759, 10822), 'dash.html.Label', 'html.Label', (['"""Depth:"""'], {'id': "('importances-depth-label-' + self.name)"}), "('Depth:', id='importances-depth-label-' + self.name)\n", (10769, 10822), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((11164, 11266), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Select how many features to display"""'], {'target': "('importances-depth-label-' + self.name)"}), "('Select how many features to display', target=\n 'importances-depth-label-' + self.name)\n", (11175, 11266), True, 'import dash_bootstrap_components as dbc\n'), ((15664, 15831), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Sort features either alphabetically or from highest mean absolute SHAP value to lowest."""'], {'target': "('feature-descriptions-table-sort-' + self.name)"}), "(\n 'Sort features either alphabetically or from highest mean absolute SHAP value to lowest.'\n , target='feature-descriptions-table-sort-' + self.name)\n", (15675, 15831), True, 'import dash_bootstrap_components as dbc\n'), ((23119, 23209), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Feature:"""'], {'html_for': "('pdp-col' + self.name)", 'id': "('pdp-col-label-' + self.name)"}), "('Feature:', html_for='pdp-col' + self.name, id='pdp-col-label-' +\n self.name)\n", (23128, 23209), True, 'import dash_bootstrap_components as dbc\n'), ((23268, 23401), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Select the feature for which you want to see the partial dependence plot"""'], {'target': "('pdp-col-label-' + self.name)"}), "(\n 'Select the feature for which you want to see the partial dependence plot',\n target='pdp-col-label-' + self.name)\n", (23279, 23401), True, 'import dash_bootstrap_components as dbc\n'), ((23872, 23949), 'dash_bootstrap_components.Label', 'dbc.Label', (['f"""{self.explainer.index_name}:"""'], {'id': "('pdp-index-label-' + self.name)"}), "(f'{self.explainer.index_name}:', id='pdp-index-label-' + self.name)\n", (23881, 23949), True, 'import dash_bootstrap_components as dbc\n'), ((23977, 24123), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['f"""Select the {self.explainer.index_name} to display the partial dependence plot for"""'], {'target': "('pdp-index-label-' + self.name)"}), "(\n f'Select the {self.explainer.index_name} to display the partial dependence plot for'\n , target='pdp-index-label-' + self.name)\n", (23988, 24123), True, 'import dash_bootstrap_components as dbc\n'), ((41667, 41709), 'dash_bootstrap_components.Label', 'dbc.Label', (['f"""{self.explainer.index_name}:"""'], {}), "(f'{self.explainer.index_name}:')\n", (41676, 41709), True, 'import dash_bootstrap_components as dbc\n'), ((26273, 26329), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Sample:"""'], {'id': "('pdp-sample-label-' + self.name)"}), "('Sample:', id='pdp-sample-label-' + self.name)\n", (26282, 26329), True, 'import dash_bootstrap_components as dbc\n'), ((26358, 26491), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Number of observations to use to calculate average partial dependence"""'], {'target': "('pdp-sample-label-' + self.name)"}), "(\n 'Number of observations to use to calculate average partial dependence',\n target='pdp-sample-label-' + self.name)\n", (26369, 26491), True, 'import dash_bootstrap_components as dbc\n'), ((26888, 26950), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Gridlines:"""'], {'id': "('pdp-gridlines-label-' + self.name)"}), "('Gridlines:', id='pdp-gridlines-label-' + self.name)\n", (26897, 26950), True, 'import dash_bootstrap_components as dbc\n'), ((26979, 27116), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Number of individual observations\' partial dependences to show in plot"""'], {'target': "('pdp-gridlines-label-' + self.name)"}), '(\n "Number of individual observations\' partial dependences to show in plot",\n target=\'pdp-gridlines-label-\' + self.name)\n', (26990, 27116), True, 'import dash_bootstrap_components as dbc\n'), ((27521, 27585), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Gridpoints:"""'], {'id': "('pdp-gridpoints-label-' + self.name)"}), "('Gridpoints:', id='pdp-gridpoints-label-' + self.name)\n", (27530, 27585), True, 'import dash_bootstrap_components as dbc\n'), ((27614, 27809), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Number of points to sample the feature axis for predictions. The higher, the smoother the curve, but takes longer to calculate"""'], {'target': "('pdp-gridpoints-label-' + self.name)"}), "(\n 'Number of points to sample the feature axis for predictions. The higher, the smoother the curve, but takes longer to calculate'\n , target='pdp-gridpoints-label-' + self.name)\n", (27625, 27809), True, 'import dash_bootstrap_components as dbc\n'), ((27912, 28022), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'id': "('pdp-gridpoints-' + self.name)", 'value': 'self.gridpoints', 'type': '"""number"""', 'min': '(0)', 'max': '(100)', 'step': '(1)'}), "(id='pdp-gridpoints-' + self.name, value=self.gridpoints, type=\n 'number', min=0, max=100, step=1)\n", (27921, 28022), True, 'import dash_bootstrap_components as dbc\n'), ((3447, 3568), 'dash_bootstrap_components.RadioButton', 'dbc.RadioButton', ([], {'id': "('modelprediction-percentile-' + self.name)", 'className': '"""form-check-input"""', 'checked': 'self.percentile'}), "(id='modelprediction-percentile-' + self.name, className=\n 'form-check-input', checked=self.percentile)\n", (3462, 3568), True, 'import dash_bootstrap_components as dbc\n'), ((3705, 3819), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Show percentile"""'], {'html_for': "('modelprediction-percentile' + self.name)", 'className': '"""form-check-label"""'}), "('Show percentile', html_for='modelprediction-percentile' + self.\n name, className='form-check-label')\n", (3714, 3819), True, 'import dash_bootstrap_components as dbc\n'), ((9394, 9424), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Importances type:"""'], {}), "('Importances type:')\n", (9403, 9424), True, 'import dash_bootstrap_components as dbc\n'), ((9458, 9671), 'dash_bootstrap_components.Select', 'dbc.Select', ([], {'options': "[{'label': 'Permutation Importances', 'value': 'permutation'}, {'label':\n 'SHAP values', 'value': 'shap'}]", 'value': 'self.importance_type', 'id': "('importances-permutation-or-shap-' + self.name)"}), "(options=[{'label': 'Permutation Importances', 'value':\n 'permutation'}, {'label': 'SHAP values', 'value': 'shap'}], value=self.\n importance_type, id='importances-permutation-or-shap-' + self.name)\n", (9468, 9671), True, 'import dash_bootstrap_components as dbc\n'), ((15025, 15052), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Sort Features:"""'], {}), "('Sort Features:')\n", (15034, 15052), True, 'import dash_bootstrap_components as dbc\n'), ((15086, 15270), 'dash_bootstrap_components.Select', 'dbc.Select', ([], {'options': "[{'label': 'Alphabetically', 'value': 'alphabet'}, {'label': 'SHAP',\n 'value': 'shap'}]", 'value': 'self.sort', 'id': "('feature-descriptions-table-sort-' + self.name)"}), "(options=[{'label': 'Alphabetically', 'value': 'alphabet'}, {\n 'label': 'SHAP', 'value': 'shap'}], value=self.sort, id=\n 'feature-descriptions-table-sort-' + self.name)\n", (15096, 15270), True, 'import dash_bootstrap_components as dbc\n'), ((25301, 25324), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Drop fill:"""'], {}), "('Drop fill:')\n", (25310, 25324), True, 'import dash_bootstrap_components as dbc\n'), ((25358, 25565), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['f"""Drop all observations with feature values equal to {self.explainer.na_fill} from the plot. This prevents the filler values from ruining the x-axis."""'], {'target': "('pdp-dropna-' + self.name)"}), "(\n f'Drop all observations with feature values equal to {self.explainer.na_fill} from the plot. This prevents the filler values from ruining the x-axis.'\n , target='pdp-dropna-' + self.name)\n", (25369, 25565), True, 'import dash_bootstrap_components as dbc\n'), ((25718, 25885), 'dash_bootstrap_components.Checklist', 'dbc.Checklist', ([], {'options': "[{'label': 'Drop na_fill', 'value': True}]", 'value': '([True] if self.dropna else [])', 'id': "('pdp-dropna-' + self.name)", 'inline': '(True)', 'switch': '(True)'}), "(options=[{'label': 'Drop na_fill', 'value': True}], value=[\n True] if self.dropna else [], id='pdp-dropna-' + self.name, inline=True,\n switch=True)\n", (25731, 25885), True, 'import dash_bootstrap_components as dbc\n'), ((28249, 28324), 'dash.html.Label', 'html.Label', (['"""Sort categories:"""'], {'id': "('pdp-categories-sort-label-' + self.name)"}), "('Sort categories:', id='pdp-categories-sort-label-' + self.name)\n", (28259, 28324), False, 'from dash import html, dcc, Input, Output, State, dash_table\n'), ((28360, 28564), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""How to sort the categories: Alphabetically, most common first (Frequency), or highest mean absolute SHAP value first (Shap impact)"""'], {'target': "('pdp-categories-sort-label-' + self.name)"}), "(\n 'How to sort the categories: Alphabetically, most common first (Frequency), or highest mean absolute SHAP value first (Shap impact)'\n , target='pdp-categories-sort-label-' + self.name)\n", (28371, 28564), True, 'import dash_bootstrap_components as dbc\n'), ((28690, 28913), 'dash_bootstrap_components.Select', 'dbc.Select', ([], {'id': "('pdp-categories-sort-' + self.name)", 'options': "[{'label': 'Alphabetically', 'value': 'alphabet'}, {'label': 'Frequency',\n 'value': 'freq'}, {'label': 'Shap impact', 'value': 'shap'}]", 'value': 'self.cats_sort'}), "(id='pdp-categories-sort-' + self.name, options=[{'label':\n 'Alphabetically', 'value': 'alphabet'}, {'label': 'Frequency', 'value':\n 'freq'}, {'label': 'Shap impact', 'value': 'shap'}], value=self.cats_sort)\n", (28700, 28913), True, 'import dash_bootstrap_components as dbc\n')]
|
# Line nofity
import requests
class LineMain:
def lineNotifyMessage(self, token, msg):
headers = {
"Authorization": "Bearer " + token,
"Content-Type" : "application/x-www-form-urlencoded"
}
payload = {'message': msg }
r = requests.post("https://notify-api.line.me/api/notify", headers = headers, params = payload)
return r.status_code
if __name__ == '__main__':
# token = 'iput token'
# message = '基本功能測試'
line = LineMain()
line.lineNotifyMessage(token, message)
|
[
"requests.post"
] |
[((300, 391), 'requests.post', 'requests.post', (['"""https://notify-api.line.me/api/notify"""'], {'headers': 'headers', 'params': 'payload'}), "('https://notify-api.line.me/api/notify', headers=headers,\n params=payload)\n", (313, 391), False, 'import requests\n')]
|
from pathlib import Path
PROJECT_ROOT = Path(__file__).parent.parent
DATA_PATH = PROJECT_ROOT / "intro_to_pytorch/data"
|
[
"pathlib.Path"
] |
[((41, 55), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (45, 55), False, 'from pathlib import Path\n')]
|
import hmac
import hashlib
import json
import requests
BASE_URL = 'https://api.mailgun.net/v3'
class Mailgun(object):
ACCESS_LEVELS = ['readonly', 'members', 'everyone']
def __init__(self, domain, private_key, public_key):
self.private_key = private_key
self.public_key = public_key
self.auth = ('api', private_key)
self.base_url = '{0}/{1}'.format(BASE_URL, domain)
def post(self, path, data, files=None, include_domain=True, auth=None):
url = self.base_url if include_domain else BASE_URL
if not auth:
auth = self.auth
return requests.post(
url + path,
auth=auth,
data=data,
files=files
)
def get(self, path, params=None, auth=None, include_domain=True):
url = self.base_url if include_domain else BASE_URL
if not auth:
auth = self.auth
return requests.get(url + path, auth=auth, params=params)
def send_message(self, from_email, to, cc=None, bcc=None,
subject=None, text=None, html=None, user_variables=None,
reply_to=None, headers=None, inlines=None,
attachments=None, campaign_id=None, tags=None):
# sanity checks
assert (text or html)
data = {
'from': from_email,
'to': to,
'cc': cc or [],
'bcc': bcc or [],
'subject': subject or '',
'text': text or '',
'html': html or '',
}
if reply_to:
data['h:Reply-To'] = reply_to
if headers:
for k, v in headers.items():
data["h:%s" % k] = v
if campaign_id:
data['o:campaign'] = campaign_id
if tags:
data['o:tag'] = tags
if user_variables:
for k, v in user_variables.items():
data['v:%s' % k] = v
files = []
if inlines:
for filename in inlines:
files.append(('inline', open(filename)))
if attachments:
for filename, content_type, content in attachments:
files.append(('attachment', (filename, content, content_type)))
return self.post('/messages', data, files=files)
def get_events(self, begin=None, end=None, ascending=None, limit=None,
filters=None):
params = {}
if begin:
params['begin'] = begin
if end:
params['end'] = end
if ascending:
params['ascending'] = ascending
if limit:
params['limit'] = limit
if filters is None:
filters = {}
params.update(filters)
return self.get('/events', params=params)
def create_list(self, address, name=None, description=None,
access_level=None):
data = {'address': address}
if name:
data['name'] = name
if description:
data['description'] = description
if access_level and access_level in Mailgun.ACCESS_LEVELS:
data['access_level'] = access_level
return self.post('/lists', data, include_domain=False)
def add_list_member(self, list_name, address, name=None, params=None,
subscribed=True, upsert=False):
data = {'address': address}
if name:
data['name'] = name
if params:
data['vars'] = json.dumps(params) if isinstance(
params, dict) else params
if not subscribed:
data['subscribed'] = 'no'
if upsert:
data['upsert'] = 'yes'
url = '/lists/%s/members' % list_name
return self.post(url, data, include_domain=False)
def verify_authenticity(self, token, timestamp, signature):
return signature == hmac.new(
key=self.private_key, msg='{}{}'.format(timestamp, token),
digestmod=hashlib.sha256).hexdigest()
def validate(self, address):
params = {'address': address}
auth = ('api', self.public_key)
return self.get(
'/address/validate',
params=params,
auth=auth,
include_domain=False,
)
|
[
"requests.post",
"requests.get",
"json.dumps"
] |
[((614, 674), 'requests.post', 'requests.post', (['(url + path)'], {'auth': 'auth', 'data': 'data', 'files': 'files'}), '(url + path, auth=auth, data=data, files=files)\n', (627, 674), False, 'import requests\n'), ((929, 979), 'requests.get', 'requests.get', (['(url + path)'], {'auth': 'auth', 'params': 'params'}), '(url + path, auth=auth, params=params)\n', (941, 979), False, 'import requests\n'), ((3506, 3524), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (3516, 3524), False, 'import json\n')]
|
from django.test import SimpleTestCase
from pattern_library.utils import get_template_ancestors
class TestGetTemplateAncestors(SimpleTestCase):
def test_page(self):
self.assertEqual(
get_template_ancestors('patterns/pages/test_page/test_page.html'),
[
'patterns/pages/test_page/test_page.html',
'patterns/base_page.html',
'patterns/base.html',
],
)
def test_fragment(self):
self.assertEqual(
get_template_ancestors('patterns/atoms/test_atom/test_atom.html'),
[
'patterns/atoms/test_atom/test_atom.html',
],
)
def test_parent_template_from_variable(self):
self.assertEqual(
get_template_ancestors(
'patterns/atoms/test_extends/extended.html',
context={'parent_template_name': 'patterns/base.html'},
),
[
'patterns/atoms/test_extends/extended.html',
'patterns/base.html',
],
)
|
[
"pattern_library.utils.get_template_ancestors"
] |
[((210, 275), 'pattern_library.utils.get_template_ancestors', 'get_template_ancestors', (['"""patterns/pages/test_page/test_page.html"""'], {}), "('patterns/pages/test_page/test_page.html')\n", (232, 275), False, 'from pattern_library.utils import get_template_ancestors\n'), ((524, 589), 'pattern_library.utils.get_template_ancestors', 'get_template_ancestors', (['"""patterns/atoms/test_atom/test_atom.html"""'], {}), "('patterns/atoms/test_atom/test_atom.html')\n", (546, 589), False, 'from pattern_library.utils import get_template_ancestors\n'), ((778, 906), 'pattern_library.utils.get_template_ancestors', 'get_template_ancestors', (['"""patterns/atoms/test_extends/extended.html"""'], {'context': "{'parent_template_name': 'patterns/base.html'}"}), "('patterns/atoms/test_extends/extended.html', context\n ={'parent_template_name': 'patterns/base.html'})\n", (800, 906), False, 'from pattern_library.utils import get_template_ancestors\n')]
|
'''
Builds a networkx graph from the coexistance data and attributes calculated by input.py
'''
import sys
import math
import json
import re
import pandas as pd
import networkx as nx
from tqdm import tqdm
def prob_calc():
print('Calculating probability normalised weights...')
# calculate expected probability and weights
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
# read in data
coexist_df = pd.read_csv('data/coexistences.csv', index_col = 0, dtype={'attribute1': str, 'attribute2': str, 'totals': int})
attributes_df = pd.read_csv('data/attributes.csv', index_col = 0, dtype={'attribute': str, 'frequency': int})
sample_no = file_len('data/samples.csv')
#NB totals is the observed frequency
# probability calculations and mapping
attributes_df['prob'] = attributes_df['frequency'] / sample_no
merge1_df = pd.merge(left = coexist_df, right = attributes_df, left_on = 'attribute1', right_on = 'attribute')
merge1_df.rename(columns={'prob':'Attribute1_prob'}, inplace=True)
merge2_df = merge1_df[['attribute1', 'attribute2', 'totals', 'Attribute1_prob']]
merge3_df = pd.merge(left = merge2_df, right = attributes_df, left_on = 'attribute2', right_on = 'attribute')
merge3_df.rename(columns={'prob':'Attribute2_prob'}, inplace=True)
merge4_df = merge3_df[['attribute1', 'attribute2', 'totals', 'Attribute1_prob', 'Attribute2_prob']]
merge4_df['exp'] = ((merge4_df['Attribute1_prob'] * merge4_df['Attribute2_prob']) * sample_no) # looked into warning it throws can't see an issue
merge4_df['diff'] = merge4_df['totals'] - merge4_df['exp']
merge4_df['weight'] = merge4_df['diff']/merge4_df['diff'].sum() # as per stats deffinition 'weights' must add up to 1
# output
merge5_df = merge4_df.sort_values(['diff'])
coexistProb_df = merge5_df[['attribute1', 'attribute2', 'totals', 'exp', 'diff', 'weight']]
# during merge only rows with the corresponding columns are merged!
no_missing = coexist_df.shape[0] - coexistProb_df.shape[0]
if no_missing > 0:
print('WARNING: '+str(no_missing)+' pairs missing due to input file discrepancy.')
else:
print('Full house. No pairs were thrown out.')
return coexistProb_df
if __name__ == "__main__":
# def run():
df = prob_calc() # calculates the probability weighted coexistance weights
df.to_csv('data/coexistencesProb.csv', header = True, mode = 'w')
print('Building NetworkX...')
G=nx.Graph()
G = nx.from_pandas_dataframe(df,source='attribute1', target='attribute2', edge_attr='weight')
# # for cytoscape save
# nx.write_gml(G,'coexistences.gml')
# for gephi save
nx.write_gexf(G,'data/coexistences.gexf') # this file is used by coexistence.py
|
[
"pandas.read_csv",
"pandas.merge",
"networkx.Graph",
"networkx.write_gexf",
"networkx.from_pandas_dataframe"
] |
[((460, 574), 'pandas.read_csv', 'pd.read_csv', (['"""data/coexistences.csv"""'], {'index_col': '(0)', 'dtype': "{'attribute1': str, 'attribute2': str, 'totals': int}"}), "('data/coexistences.csv', index_col=0, dtype={'attribute1': str,\n 'attribute2': str, 'totals': int})\n", (471, 574), True, 'import pandas as pd\n'), ((590, 685), 'pandas.read_csv', 'pd.read_csv', (['"""data/attributes.csv"""'], {'index_col': '(0)', 'dtype': "{'attribute': str, 'frequency': int}"}), "('data/attributes.csv', index_col=0, dtype={'attribute': str,\n 'frequency': int})\n", (601, 685), True, 'import pandas as pd\n'), ((883, 977), 'pandas.merge', 'pd.merge', ([], {'left': 'coexist_df', 'right': 'attributes_df', 'left_on': '"""attribute1"""', 'right_on': '"""attribute"""'}), "(left=coexist_df, right=attributes_df, left_on='attribute1',\n right_on='attribute')\n", (891, 977), True, 'import pandas as pd\n'), ((1145, 1238), 'pandas.merge', 'pd.merge', ([], {'left': 'merge2_df', 'right': 'attributes_df', 'left_on': '"""attribute2"""', 'right_on': '"""attribute"""'}), "(left=merge2_df, right=attributes_df, left_on='attribute2',\n right_on='attribute')\n", (1153, 1238), True, 'import pandas as pd\n'), ((2426, 2436), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2434, 2436), True, 'import networkx as nx\n'), ((2442, 2536), 'networkx.from_pandas_dataframe', 'nx.from_pandas_dataframe', (['df'], {'source': '"""attribute1"""', 'target': '"""attribute2"""', 'edge_attr': '"""weight"""'}), "(df, source='attribute1', target='attribute2',\n edge_attr='weight')\n", (2466, 2536), True, 'import networkx as nx\n'), ((2615, 2657), 'networkx.write_gexf', 'nx.write_gexf', (['G', '"""data/coexistences.gexf"""'], {}), "(G, 'data/coexistences.gexf')\n", (2628, 2657), True, 'import networkx as nx\n')]
|
"""Script that merges configurations for debug or simplification."""
from __future__ import print_function
import argparse
import yaml
from opennmt.config import load_config
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("config", nargs="+", help="Configuration files.")
args = parser.parse_args()
config = load_config(args.config)
print(yaml.dump(config, default_flow_style=False))
if __name__ == "__main__":
main()
|
[
"opennmt.config.load_config",
"yaml.dump",
"argparse.ArgumentParser"
] |
[((202, 281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (225, 281), False, 'import argparse\n'), ((394, 418), 'opennmt.config.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (405, 418), False, 'from opennmt.config import load_config\n'), ((427, 470), 'yaml.dump', 'yaml.dump', (['config'], {'default_flow_style': '(False)'}), '(config, default_flow_style=False)\n', (436, 470), False, 'import yaml\n')]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import confusion_matrix as cm
def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Deprecated. Use tf.confusion_matrix instead."""
return cm.confusion_matrix(labels=labels, predictions=predictions,
num_classes=num_classes, dtype=dtype, name=name,
weights=weights)
|
[
"tensorflow.python.ops.confusion_matrix.confusion_matrix"
] |
[((1135, 1265), 'tensorflow.python.ops.confusion_matrix.confusion_matrix', 'cm.confusion_matrix', ([], {'labels': 'labels', 'predictions': 'predictions', 'num_classes': 'num_classes', 'dtype': 'dtype', 'name': 'name', 'weights': 'weights'}), '(labels=labels, predictions=predictions, num_classes=\n num_classes, dtype=dtype, name=name, weights=weights)\n', (1154, 1265), True, 'from tensorflow.python.ops import confusion_matrix as cm\n')]
|
"""
Module with tests for debug
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from ...tests.base import TestsBase
from ..debug import DebugWriter
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
class TestDebug(TestsBase):
"""Contains test functions for debug.py"""
def test_output(self):
"""Test debug writer output."""
# Capture the stdout. Remember original.
stdout = sys.stdout
stream = StringIO()
sys.stdout = stream
# Create stdout writer, get output
writer = DebugWriter()
writer.write('aaa', {'outputs': {'bbb': 'ccc'}})
output = stream.getvalue()
# Check output. Make sure resources dictionary is dumped, but nothing
# else.
assert 'aaa' not in output
assert 'bbb' in output
assert 'ccc' in output
# Revert stdout
sys.stdout = stdout
|
[
"StringIO.StringIO"
] |
[((1172, 1182), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (1180, 1182), False, 'from StringIO import StringIO\n')]
|
__author__ = "Altertech Group, https://www.altertech.com/"
__copyright__ = "Copyright (C) 2012-2018 Altertech Group"
__license__ = "Apache License 2.0"
__version__ = "1.1.1"
__description__ = "BME280 temperature/humidity/pressure sensors (I2C/SMBus)"
__api__ = 5
__required__ = ['aao_get', 'value']
__mods_required__ = ['smbus2']
__lpi_default__ = 'sensor'
__equipment__ = 'BME280'
__features__ = []
__config_help__ = [{
'name': 'bus',
'help': 'I2C bus to use',
'type': 'int',
'required': True
}, {
'name': 'addr',
'help': 'Device address on bus, hexdecimal (default: 0x76)',
'type': 'hex',
'required': False
}]
__get_help__ = []
__set_help__ = []
bus_delay = 0.5
__help__ = """
PHI for BME280 sensors (and compatible) connected via local I2C bus. Returns
port 'h' for humidity, 't' for temperature, 'p' for pressure.
"""
from eva.uc.drivers.phi.generic_phi import PHI as GenericPHI
from eva.uc.driverapi import log_traceback
from eva.uc.driverapi import get_timeout
import eva.uc.i2cbus
import os
import importlib
import time
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
from eva.uc.driverapi import phi_constructor
class PHI(GenericPHI):
@phi_constructor
def __init__(self, **kwargs):
try:
self.smbus2 = importlib.import_module('smbus2')
except:
self.log_error('unable to load smbus2 python module')
self.ready = False
return
try:
self.bus = int(self.phi_cfg.get('bus'))
except:
self.log_error('I2C bus not specified')
self.ready = False
return
try:
self.addr = int(self.phi_cfg.get('addr', '0x76'), 16)
except:
self.log_error('Invalid address: %s ' % self.phi_cfg.get('addr'))
self.ready = False
def get(self, port=None, cfg=None, timeout=0):
try:
if not eva.uc.i2cbus.lock(self.bus):
raise Exception('Unable to lock I2C bus %s ' % self.bus)
b = self.smbus2.SMBus(self.bus)
time_start = time.time()
rd = False
while time_start + timeout >= time.time():
try:
t, p, h = self.readBME280All(bus=b, addr=self.addr)
if t is not None and p is not None and h is not None:
rd = True
break
except:
pass
time.sleep(bus_delay)
eva.uc.i2cbus.release(self.bus)
if not rd: raise Exception('data read error')
return {
't': int(t * 100) / 100.0,
'p': int(p * 100) / 100.0,
'h': int(h * 100) / 100.0
}
except:
log_traceback()
return None
def test(self, cmd=None):
if cmd == 'self' or cmd == 'info':
try:
if not eva.uc.i2cbus.lock(self.bus):
raise Exception('Unable to acquire I2C bus %s ' % self.bus)
b = self.smbus2.SMBus(self.bus)
try:
i, v = self.readBME280ID(bus=b, addr=self.addr)
except:
eva.uc.i2cbus.release(self.bus)
raise
eva.uc.i2cbus.release(self.bus)
if i is None or v is None: raise Exception('data read error')
except:
log_traceback()
return 'FAILED'
return {'id': i, 'version': v} if cmd != 'self' else 'OK'
elif cmd == 'get':
return self.get(timeout=get_timeout())
else:
return {
'info': 'Get chip ID and version',
'get': 'Get data from chip'
}
# the code below is based on bme280.py tool
# Author : <NAME>
# Date : 25/07/2016
#
# http://www.raspberrypi-spy.co.uk/
#
#--------------------------------------
def readBME280ID(self, bus, addr):
# Chip ID Register Address
REG_ID = 0xD0
(chip_id, chip_version) = bus.read_i2c_block_data(addr, REG_ID, 2)
return (chip_id, chip_version)
def readBME280All(self, bus, addr):
# Register Addresses
REG_DATA = 0xF7
REG_CONTROL = 0xF4
REG_CONFIG = 0xF5
REG_CONTROL_HUM = 0xF2
REG_HUM_MSB = 0xFD
REG_HUM_LSB = 0xFE
# Oversample setting - page 27
OVERSAMPLE_TEMP = 2
OVERSAMPLE_PRES = 2
MODE = 1
# Oversample setting for humidity register - page 26
OVERSAMPLE_HUM = 2
bus.write_byte_data(addr, REG_CONTROL_HUM, OVERSAMPLE_HUM)
control = OVERSAMPLE_TEMP << 5 | OVERSAMPLE_PRES << 2 | MODE
bus.write_byte_data(addr, REG_CONTROL, control)
# Read blocks of calibration data from EEPROM
# See Page 22 data sheet
cal1 = bus.read_i2c_block_data(addr, 0x88, 24)
cal2 = bus.read_i2c_block_data(addr, 0xA1, 1)
cal3 = bus.read_i2c_block_data(addr, 0xE1, 7)
# Convert byte data to word values
dig_T1 = getUShort(cal1, 0)
dig_T2 = getShort(cal1, 2)
dig_T3 = getShort(cal1, 4)
dig_P1 = getUShort(cal1, 6)
dig_P2 = getShort(cal1, 8)
dig_P3 = getShort(cal1, 10)
dig_P4 = getShort(cal1, 12)
dig_P5 = getShort(cal1, 14)
dig_P6 = getShort(cal1, 16)
dig_P7 = getShort(cal1, 18)
dig_P8 = getShort(cal1, 20)
dig_P9 = getShort(cal1, 22)
dig_H1 = getUChar(cal2, 0)
dig_H2 = getShort(cal3, 0)
dig_H3 = getUChar(cal3, 2)
dig_H4 = getChar(cal3, 3)
dig_H4 = (dig_H4 << 24) >> 20
dig_H4 = dig_H4 | (getChar(cal3, 4) & 0x0F)
dig_H5 = getChar(cal3, 5)
dig_H5 = (dig_H5 << 24) >> 20
dig_H5 = dig_H5 | (getUChar(cal3, 4) >> 4 & 0x0F)
dig_H6 = getChar(cal3, 6)
# Wait in ms (Datasheet Appendix B: Measurement time and current
# calculation)
wait_time = 1.25 + (2.3 * OVERSAMPLE_TEMP) + (
(2.3 * OVERSAMPLE_PRES) + 0.575) + ((2.3 * OVERSAMPLE_HUM) + 0.575)
time.sleep(wait_time / 1000) # Wait the required time
# Read temperature/pressure/humidity
data = bus.read_i2c_block_data(addr, REG_DATA, 8)
pres_raw = (data[0] << 12) | (data[1] << 4) | (data[2] >> 4)
temp_raw = (data[3] << 12) | (data[4] << 4) | (data[5] >> 4)
hum_raw = (data[6] << 8) | data[7]
#Refine temperature
var1 = ((((temp_raw >> 3) - (dig_T1 << 1))) * (dig_T2)) >> 11
var2 = (((((temp_raw >> 4) - (dig_T1)) * (
(temp_raw >> 4) - (dig_T1))) >> 12) * (dig_T3)) >> 14
t_fine = var1 + var2
temperature = float(((t_fine * 5) + 128) >> 8)
# Refine pressure and adjust for temperature
var1 = t_fine / 2.0 - 64000.0
var2 = var1 * var1 * dig_P6 / 32768.0
var2 = var2 + var1 * dig_P5 * 2.0
var2 = var2 / 4.0 + dig_P4 * 65536.0
var1 = (dig_P3 * var1 * var1 / 524288.0 + dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * dig_P1
if var1 == 0:
pressure = 0
else:
pressure = 1048576.0 - pres_raw
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = dig_P9 * pressure * pressure / 2147483648.0
var2 = pressure * dig_P8 / 32768.0
pressure = pressure + (var1 + var2 + dig_P7) / 16.0
# Refine humidity
humidity = t_fine - 76800.0
humidity = (hum_raw - (dig_H4 * 64.0 + dig_H5 / 16384.0 * humidity)) * (
dig_H2 / 65536.0 * (1.0 + dig_H6 / 67108864.0 * humidity *
(1.0 + dig_H3 / 67108864.0 * humidity)))
humidity = humidity * (1.0 - dig_H1 * humidity / 524288.0)
if humidity > 100:
humidity = 100
elif humidity < 0:
humidity = 0
return temperature / 100.0, pressure / 100.0, humidity
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index + 1] << 8) + data[index]).value
def getUShort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index + 1] << 8) + data[index]
def getChar(data, index):
# return one byte from data as a signed char
result = data[index]
if result > 127:
result -= 256
return result
def getUChar(data, index):
# return one byte from data as an unsigned char
result = data[index] & 0xFF
return result
|
[
"importlib.import_module",
"eva.uc.driverapi.log_traceback",
"time.time",
"time.sleep",
"eva.uc.driverapi.get_timeout",
"ctypes.c_short"
] |
[((6219, 6247), 'time.sleep', 'time.sleep', (['(wait_time / 1000)'], {}), '(wait_time / 1000)\n', (6229, 6247), False, 'import time\n'), ((8177, 8222), 'ctypes.c_short', 'c_short', (['((data[index + 1] << 8) + data[index])'], {}), '((data[index + 1] << 8) + data[index])\n', (8184, 8222), False, 'from ctypes import c_short\n'), ((1311, 1344), 'importlib.import_module', 'importlib.import_module', (['"""smbus2"""'], {}), "('smbus2')\n", (1334, 1344), False, 'import importlib\n'), ((2120, 2131), 'time.time', 'time.time', ([], {}), '()\n', (2129, 2131), False, 'import time\n'), ((2197, 2208), 'time.time', 'time.time', ([], {}), '()\n', (2206, 2208), False, 'import time\n'), ((2506, 2527), 'time.sleep', 'time.sleep', (['bus_delay'], {}), '(bus_delay)\n', (2516, 2527), False, 'import time\n'), ((2821, 2836), 'eva.uc.driverapi.log_traceback', 'log_traceback', ([], {}), '()\n', (2834, 2836), False, 'from eva.uc.driverapi import log_traceback\n'), ((3486, 3501), 'eva.uc.driverapi.log_traceback', 'log_traceback', ([], {}), '()\n', (3499, 3501), False, 'from eva.uc.driverapi import log_traceback\n'), ((3667, 3680), 'eva.uc.driverapi.get_timeout', 'get_timeout', ([], {}), '()\n', (3678, 3680), False, 'from eva.uc.driverapi import get_timeout\n')]
|
#!/usr/bin/python
"""
linuxrouter.py: Example network with Linux IP router
This example converts a Node into a router using IP forwarding
already built into Linux.
The example topology creates a router and three IP subnets:
- 192.168.1.0/24 (r0-eth1, IP: 192.168.1.1)
- 172.16.0.0/12 (r0-eth2, IP: 172.16.0.1)
- 10.0.0.0/8 (r0-eth3, IP: 10.0.0.1)
Each subnet consists of a single host connected to
a single switch:
r0-eth1 - s1-eth1 - h1-eth0 (IP: 192.168.1.100)
r0-eth2 - s2-eth1 - h2-eth0 (IP: 172.16.0.100)
r0-eth3 - s3-eth1 - h3-eth0 (IP: 10.0.0.100)
The example relies on default routing entries that are
automatically created for each router interface, as well
as 'defaultRoute' parameters for the host interfaces.
Additional routes may be added to the router or hosts by
executing 'ip route' or 'route' commands on the router or hosts.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Node
from mininet.log import setLogLevel, info
from mininet.cli import CLI
class LinuxRouter( Node ):
"A Node with IP forwarding enabled."
def config( self, **params ):
super( LinuxRouter, self).config( **params )
# Enable forwarding on the router
self.cmd( 'sysctl net.ipv4.ip_forward=1' )
def terminate( self ):
self.cmd( 'sysctl net.ipv4.ip_forward=0' )
super( LinuxRouter, self ).terminate()
class NetworkTopo( Topo ):
"A LinuxRouter connecting three IP subnets"
def build( self, **_opts ):
r0 = self.addNode( 'r0', cls=LinuxRouter, ip='10.0.0.1/24' )
r1 = self.addNode( 'r1', cls=LinuxRouter, ip='10.0.1.1/24' )
s1, s2, s3, s4 = [ self.addSwitch( s ) for s in ( 's1', 's2', 's3', 's4' ) ]
self.addLink( s1, r0, intfName2='r0-eth0', params2={ 'ip' : '10.0.0.1/24' } )
self.addLink( s2, r0, intfName2='r0-eth1', params2={ 'ip' : '10.0.2.1/24' } )
self.addLink( s3, r1, intfName2='r1-eth0', params2={ 'ip' : '10.0.1.1/24' } )
self.addLink( s4, r1, intfName2='r1-eth1', params2={ 'ip' : '10.0.3.1/24' } )
h1 = self.addHost( 'h1', ip='10.0.0.100/24', defaultRoute='via 10.0.0.1' )
h2 = self.addHost( 'h2', ip='10.0.2.100/24', defaultRoute='via 10.0.2.1' )
for h, s in [ ( h1, s1 ), ( h1, s3 ), ( h2, s2 ), ( h2, s4 ) ]:
self.addLink( h, s )
def run():
"Test linux router"
topo = NetworkTopo()
net = Mininet( topo=topo )
net.start()
info( '*** Routing Table on Router:\n' )
info( net[ 'r0' ].cmd( 'route' ) )
info( net[ 'r1' ].cmd( 'route' ) )
net[ 'h1' ].cmd('tc qdisc add dev h1-eth0 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'h1' ].cmd('tc qdisc add dev h1-eth1 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'h2' ].cmd('tc qdisc add dev h2-eth0 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'h2' ].cmd('tc qdisc add dev h2-eth1 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'r0' ].cmd('tc qdisc add dev r0-eth0 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'r0' ].cmd('tc qdisc add dev r0-eth1 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'r1' ].cmd('tc qdisc add dev r1-eth0 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'r1' ].cmd('tc qdisc add dev r1-eth1 root tbf rate 50Mbit burst 1mbit latency 1ms')
net[ 'h1' ].setIP('10.0.0.100/24', intf='h1-eth0')
net[ 'h1' ].setIP('10.0.1.100/24', intf='h1-eth1')
net[ 'h2' ].setIP('10.0.2.100/24', intf='h2-eth0')
net[ 'h2' ].setIP('10.0.3.100/24', intf='h2-eth1')
net[ 'h1' ].cmd('ip rule add from 10.0.0.100 table 1')
net[ 'h1' ].cmd('ip rule add from 10.0.1.100 table 2')
net[ 'h1' ].cmd('ip route add 10.0.0.0/24 dev h1-eth0 scope link table 1')
net[ 'h1' ].cmd('ip route add default via 10.0.0.1 dev h1-eth0 table 1')
net[ 'h1' ].cmd('ip route add 10.0.1.0/24 dev h1-eth1 scope link table 2')
net[ 'h1' ].cmd('ip route add default via 10.0.1.1 dev h1-eth1 table 2')
net[ 'h1' ].cmd('ip route add default scope global nexthop via 10.0.0.1 dev h1-eth0')
net[ 'h2' ].cmd('ip rule add from 10.0.2.100 table 1')
net[ 'h2' ].cmd('ip rule add from 10.0.3.100 table 2')
net[ 'h2' ].cmd('ip route add 10.0.2.0/24 dev h2-eth0 scope link table 1')
net[ 'h2' ].cmd('ip route add default via 10.0.2.1 dev h2-eth0 table 1')
net[ 'h2' ].cmd('ip route add 10.0.3.0/24 dev h2-eth1 scope link table 2')
net[ 'h2' ].cmd('ip route add default via 10.0.3.1 dev h2-eth1 table 2')
net[ 'h2' ].cmd('ip route add default scope global nexthop via 10.0.2.1 dev h2-eth0')
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
run()
|
[
"mininet.log.info",
"mininet.log.setLogLevel",
"mininet.net.Mininet",
"mininet.cli.CLI"
] |
[((2445, 2463), 'mininet.net.Mininet', 'Mininet', ([], {'topo': 'topo'}), '(topo=topo)\n', (2452, 2463), False, 'from mininet.net import Mininet\n'), ((2487, 2525), 'mininet.log.info', 'info', (['"""*** Routing Table on Router:\n"""'], {}), "('*** Routing Table on Router:\\n')\n", (2491, 2525), False, 'from mininet.log import setLogLevel, info\n'), ((4619, 4627), 'mininet.cli.CLI', 'CLI', (['net'], {}), '(net)\n', (4622, 4627), False, 'from mininet.cli import CLI\n'), ((4677, 4696), 'mininet.log.setLogLevel', 'setLogLevel', (['"""info"""'], {}), "('info')\n", (4688, 4696), False, 'from mininet.log import setLogLevel, info\n')]
|
import base64
from io import BytesIO
import PIL
def any_image_to_base64(any_image, format=None):
"""Convert an image to base64-encoded string
:param any_image: a PIL.Image.Image instance, file-like object
or path to an image file
:type any_image: PIL.Image.Image/file/str
:param format: The format which the PIL image to be converted to,
can be 'JPEG' or 'PNG, defaults to None
:param format: str, optional
:return: base64-encoded string
:rtype: str
"""
if isinstance(any_image, str):
# If this object is an instance of str, assme it as the path
# to an image
with open(any_image, 'rb') as image_file:
return base64.b64encode(image_file.read())
elif isinstance(any_image, PIL.Image.Image):
buffered = BytesIO()
any_image.save(buffered, format=format or any_image.format)
img_str = base64.b64encode(buffered.getvalue())
return img_str
else:
# Assue this object is a file-like one, which can be read()
return base64.b64encode(any_image.read())
|
[
"io.BytesIO"
] |
[((833, 842), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (840, 842), False, 'from io import BytesIO\n')]
|
import click
import requests
def validate_token(ctx, param, value):
if value is None:
raise click.BadParameter('Pass a GitHub OAuth token with "%s" or set the environment variable %s'
% ('" / "'.join(param.opts), param.envvar))
return value
def validate_repository(ctx, param, value):
try:
owner, name = value.split('/')
r = requests.get('https://github.com/%s' % value)
r.raise_for_status()
return (owner, name)
except ValueError:
raise click.BadParameter('"%s" must be in format owner/name (example: https://github.com/%s)'
% (param.name, click.style('dmarcoux/grcheck', bold=True)))
except requests.exceptions.HTTPError as error:
if r.status_code == 404:
raise click.BadParameter('"https://github.com/%s" is not a repository' % value)
else:
raise click.ClickException(error)
@click.command()
@click.option('-t', '--token', envvar='GITHUB_OAUTH_TOKEN', callback=validate_token)
@click.argument('repository', callback=validate_repository)
def cli(token, repository):
headers = {"Authorization": "token %s" % token}
query = {'query': '{ repository(owner: "%s", name: "%s") { releases(last: 1) { nodes { tag { name } } } } }'
% repository}
r = requests.post(url='https://api.github.com/graphql', json=query, headers=headers)
nodes = r.json()['data']['repository']['releases']['nodes']
if nodes:
click.echo(nodes[0]['tag']['name'])
else:
click.echo('No release found')
|
[
"click.BadParameter",
"click.argument",
"click.option",
"click.echo",
"click.command",
"click.ClickException",
"requests.get",
"requests.post",
"click.style"
] |
[((961, 976), 'click.command', 'click.command', ([], {}), '()\n', (974, 976), False, 'import click\n'), ((978, 1066), 'click.option', 'click.option', (['"""-t"""', '"""--token"""'], {'envvar': '"""GITHUB_OAUTH_TOKEN"""', 'callback': 'validate_token'}), "('-t', '--token', envvar='GITHUB_OAUTH_TOKEN', callback=\n validate_token)\n", (990, 1066), False, 'import click\n'), ((1063, 1121), 'click.argument', 'click.argument', (['"""repository"""'], {'callback': 'validate_repository'}), "('repository', callback=validate_repository)\n", (1077, 1121), False, 'import click\n'), ((1352, 1437), 'requests.post', 'requests.post', ([], {'url': '"""https://api.github.com/graphql"""', 'json': 'query', 'headers': 'headers'}), "(url='https://api.github.com/graphql', json=query, headers=headers\n )\n", (1365, 1437), False, 'import requests\n'), ((399, 444), 'requests.get', 'requests.get', (["('https://github.com/%s' % value)"], {}), "('https://github.com/%s' % value)\n", (411, 444), False, 'import requests\n'), ((1520, 1555), 'click.echo', 'click.echo', (["nodes[0]['tag']['name']"], {}), "(nodes[0]['tag']['name'])\n", (1530, 1555), False, 'import click\n'), ((1574, 1604), 'click.echo', 'click.echo', (['"""No release found"""'], {}), "('No release found')\n", (1584, 1604), False, 'import click\n'), ((824, 897), 'click.BadParameter', 'click.BadParameter', (['(\'"https://github.com/%s" is not a repository\' % value)'], {}), '(\'"https://github.com/%s" is not a repository\' % value)\n', (842, 897), False, 'import click\n'), ((930, 957), 'click.ClickException', 'click.ClickException', (['error'], {}), '(error)\n', (950, 957), False, 'import click\n'), ((677, 719), 'click.style', 'click.style', (['"""dmarcoux/grcheck"""'], {'bold': '(True)'}), "('dmarcoux/grcheck', bold=True)\n", (688, 719), False, 'import click\n')]
|
import utility
import math
# The actual usable screen size is (in pixel):
# WIDTH = 320
# HEIGHT = 222
# The number of line and columns visible at the same time in the interpreter is (based on 'M'):
# MAX_LINE = 16 # Python font size = small
# MAX_COLUMN = 42 # Python font size = small
# MAX_LINE = 12 # Python font size = large
# MAX_COLUMN = 29 # Python font size = large
def lambdaDim(x, y, z=0):
norm = math.sqrt(x**2 + y**2 + z**2)
components = [x/norm, y/norm, z/norm]
print("norm = {:,.6g}".format(norm))
print("\u03BBx = {:,.6g}".format(components[0]))
print("\u03BBy = {:,.6g}".format(components[1]))
print("\u03BBz = {:,.6g}".format(components[2]))
menu = [
# "line title: function(arguments)",
"lambda vector (dimensions): lambdaDim(x, y, (z))",
]
utility.printMenu("Statics", menu)
lambdaDim(-0.78, 1.6)
|
[
"utility.printMenu",
"math.sqrt"
] |
[((806, 840), 'utility.printMenu', 'utility.printMenu', (['"""Statics"""', 'menu'], {}), "('Statics', menu)\n", (823, 840), False, 'import utility\n'), ((420, 455), 'math.sqrt', 'math.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (429, 455), False, 'import math\n')]
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DLA primitives and full network models.
"""
import numpy as np
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.functions as F
from nnabla.initializer import UniformInitializer, ConstantInitializer, NormalInitializer, calc_normal_std_he_forward, calc_normal_std_he_backward
from nnabla.logger import logger
from nnabla.utils.save import save
from nnabla.utils.nnp_graph import NnpNetworkPass
from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer
RNG = np.random.RandomState(214)
def pf_depthwise_deconvolution(x, kernel, stride=(1, 1), pad=(1, 1), dilation=(2, 2), with_bias=False, w_init=None, b_init=None, channel_last=False):
out_map = x.shape[3] if channel_last else x.shape[1]
if channel_last:
w_init = np.transpose(w_init, (0, 2, 3, 1))
x = PF.deconvolution(
x,
out_map,
kernel,
pad=pad,
stride=stride,
dilation=dilation,
w_init=w_init,
with_bias=with_bias,
b_init=b_init,
group=out_map,
channel_last=channel_last
)
return x
def pf_affine(r, num_classes=1000, channel_last=False):
r = PF.convolution(r, num_classes, (1, 1), channel_last=channel_last,
w_init=NormalInitializer(sigma=0.01, rng=RNG), name='fc')
return F.reshape(r, (r.shape[0], -1), inplace=False)
def pf_convolution(x, ochannels, kernel, pad=None, stride=(1, 1), dilation=None, with_bias=False, w_init=None, b_init=None, channel_last=False):
return PF.convolution(x, ochannels, kernel, stride=stride, pad=pad, dilation=dilation,
with_bias=with_bias, w_init=w_init, b_init=b_init, channel_last=channel_last)
def shortcut(x, ochannels, stride, shortcut_type, test, channel_last=False):
axes = 3 if channel_last else 1
ichannels = x.shape[axes]
use_conv = shortcut_type.lower() == 'c'
if ichannels != ochannels:
assert (ichannels * 2 == ochannels) or (ichannels * 4 == ochannels)
if shortcut_type.lower() == 'b':
use_conv = True
if use_conv:
# Convolution does everything.
# Matching channels, striding.
with nn.parameter_scope("shortcut_conv"):
x = PF.convolution(x, ochannels, (1, 1),
stride=(stride, stride), with_bias=False, channel_last=channel_last)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
else:
# shortcut block is slightly different for dla
if stride != 1:
# Stride
x = F.max_pooling(
x, kernel=(
stride, stride), stride=(
stride, stride), channel_last=channel_last)
if ichannels != ochannels:
x = PF.convolution(
x, ochannels, (1, 1), stride=(
1, 1), with_bias=False, channel_last=channel_last)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
return x
def basicblock(x, residual, ochannels, stride, test, channel_last=False):
def bn(h):
axes = [3 if channel_last else 1]
return PF.batch_normalization(h, axes=axes, batch_stat=not test)
if residual is None:
residual = x
with nn.parameter_scope("basicblock1"):
h = F.relu(bn(PF.convolution(x, ochannels, (3, 3), stride=(
stride, stride), pad=(1, 1), with_bias=False, channel_last=channel_last)))
with nn.parameter_scope("basicblock2"):
h = bn(
PF.convolution(
h, ochannels, (3, 3), pad=(
1, 1), with_bias=False, channel_last=channel_last))
return F.relu(F.add2(h, residual))
def bottleneck(x, ochannels, shortcut_type, stride, test, channel_last=False):
def bn(h):
axes = [3 if channel_last else 1]
return PF.batch_normalization(h, axes=axes, batch_stat=not test)
assert ochannels % 4 == 0
hchannels = ochannels / 4
with nn.parameter_scope("bottleneck1"):
h = F.relu(
bn(PF.convolution(x, hchannels, (1, 1),
with_bias=False, channel_last=channel_last))
)
with nn.parameter_scope("bottleneck2"):
h = F.relu(
bn(PF.convolution(h, hchannels, (3, 3), pad=(1, 1),
stride=stride, with_bias=False, channel_last=channel_last)))
with nn.parameter_scope("bottleneck3"):
h = bn(PF.convolution(h, ochannels, (1, 1),
with_bias=False, channel_last=channel_last))
with nn.parameter_scope("bottleneck_s"):
s = shortcut(x, ochannels, stride, shortcut_type, test, channel_last)
return F.relu(F.add2(h, s))
def layer(x, block, ochannels, count, stride, shortcut_type, test, channel_last=False):
for i in range(count):
with nn.parameter_scope("layer{}".format(i + 1)):
x = block(x, ochannels, stride if i ==
0 else (1, 1), shortcut_type, test, channel_last=channel_last)
return x
def _make_conv_level(x, ochannels, convs, test, stride=1, dilation=1, channel_last=False):
axes = [3 if channel_last else 1]
for i in range(convs):
with nn.parameter_scope("conv{}".format(i + 1)):
s = (stride, stride) if i == 0 else (1, 1)
x = pf_convolution(
x, ochannels, (3, 3), stride=s,
pad=(dilation, dilation),
dilation=(dilation, dilation),
with_bias=False,
channel_last=channel_last)
x = F.relu(PF.batch_normalization(
x, axes=axes, batch_stat=not test))
return x
def root(x, children, ochannels, test, concat_axis=1, kernel_size=1, channel_last=False):
axes = 3 if channel_last else 1
with nn.parameter_scope("root"):
rng = np.random.RandomState(313)
x = F.concatenate(x, *children, axis=axes)
x = pf_convolution(
x, ochannels, (kernel_size, kernel_size), pad=((kernel_size-1)//2, (kernel_size-1)//2), stride=(
1, 1),
with_bias=False,
w_init=he_initializer(ochannels, kernel_size, rng),
channel_last=channel_last
)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
x = F.relu(x)
return x
def upsample(x, ochannels, test, kernel_size=4, channel_last=False):
rng = np.random.RandomState(313)
axes = 3 if channel_last else 1
with nn.parameter_scope("up"):
x = pf_convolution(
x, ochannels, (1, 1), stride=(
1, 1),
with_bias=False,
w_init=he_initializer(ochannels, kernel_size, rng),
channel_last=channel_last
)
x = F.relu(
PF.batch_normalization(
x,
axes=[axes],
batch_stat=not test)
)
ichannels = x.shape[axes]
x = pf_depthwise_deconvolution(
x,
(kernel_size, kernel_size),
pad=(1, 1),
stride=(2, 2),
dilation=(1, 1),
with_bias=False,
w_init=bilinear_depthwise_initializer(ichannels, kernel_size),
channel_last=channel_last
)
return x
def _make_tree_level1(
x,
children,
block,
ochannels,
level,
test,
level_root=False,
stride=1,
channel_last=False
):
axes = 3 if channel_last else 1
ichannels = x.shape[axes]
bottom = F.max_pooling(
x,
kernel=(stride, stride),
stride=(stride, stride),
channel_last=channel_last
) if stride > 1 else x
if ichannels != ochannels:
residual = pf_convolution(
bottom, ochannels, (1, 1), stride=(1, 1), pad=None, with_bias=False, channel_last=channel_last)
residual = PF.batch_normalization(
residual, axes=[axes], batch_stat=not test)
else:
residual = bottom
with nn.parameter_scope('block1'):
b1 = block(x, residual, ochannels, stride,
test, channel_last=channel_last)
with nn.parameter_scope('block2'):
b2 = block(b1, b1, ochannels, 1, test, channel_last=channel_last)
_children = [bottom, b2] if level_root else [b2]
if children:
_children += children
x = root(b1, _children, ochannels, test,
kernel_size=1, channel_last=channel_last)
return x, bottom
def _make_tree_level2(
x,
children,
block,
ochannels,
level,
test,
level_root=False,
stride=1,
channel_last=False):
with nn.parameter_scope('node1'):
ag1, bottom1 = _make_tree_level1(
x, None, block, ochannels, level, test, False, stride, channel_last=channel_last)
with nn.parameter_scope('node2'):
x, _ = _make_tree_level1(
ag1, [bottom1], block, ochannels, level, test, level_root, 1, channel_last=channel_last)
return x
def dla_imagenet(
x,
num_classes,
num_layers,
test,
residual_root=False,
tiny=False,
channel_last=False):
"""
Args:
x : Variable
num_classes : Number of classes of outputs
num_layers : Number of layers of DLA chosen from (34).
test : Construct net for testing.
tiny (bool): Tiny imagenet mode. Input image must be (3, 56, 56).
"""
layers = {
# 18: ((2, 2, 2, 2), basicblock, 1),
34: ((1, 1, 1, 2, 2, 1), (False, False, False, True, True, True), basicblock)
# 50: ((3, 4, 6, 3), bottleneck, 4),
# 101: ((3, 4, 23, 3), bottleneck, 4),
# 152: ((3, 8, 36, 3), bottleneck, 4)
}
ochannels = [16, 32, 64, 128, 256, 512]
levels, levels_root, block = layers[num_layers]
strides = [1, 2, 2, 2, 2, 2]
logger.debug(x.shape)
axes = 3 if channel_last else 1
with nn.parameter_scope("conv1"):
stride = (1, 1)
r = pf_convolution(x, 16, (7, 7),
pad=(3, 3), stride=stride, with_bias=False, channel_last=channel_last)
r = F.relu(PF.batch_normalization(
r, axes=[axes], batch_stat=not test))
hidden = {}
hidden['conv0'] = r
logger.debug(r.shape)
with nn.parameter_scope("level0"):
r = _make_conv_level(
r,
ochannels[0],
levels[0],
test=test,
stride=strides[0],
channel_last=channel_last)
hidden['level0'] = r
logger.debug(r.shape)
with nn.parameter_scope("level1"):
r = _make_conv_level(
r,
ochannels[1],
levels[1],
test=test,
stride=strides[1],
channel_last=channel_last)
hidden['level1'] = r
logger.debug(r.shape)
with nn.parameter_scope("level2"):
r, _ = _make_tree_level1(
r, None, block, ochannels[2], levels[2], test, levels_root[2], stride=strides[2], channel_last=channel_last)
hidden['level2'] = r
logger.debug(r.shape)
with nn.parameter_scope("level3"):
r = _make_tree_level2(
r,
None,
block,
ochannels[3],
levels[3],
test,
levels_root[3],
stride=strides[3],
channel_last=channel_last)
hidden['level3'] = r
logger.debug(r.shape)
with nn.parameter_scope("level4"):
r = _make_tree_level2(
r,
None,
block,
ochannels[4],
levels[4],
test,
levels_root[4],
stride=strides[4],
channel_last=channel_last)
hidden['level4'] = r
logger.debug(r.shape)
with nn.parameter_scope("level5"):
r, _ = _make_tree_level1(
r, None, block, ochannels[5], levels[5], test, levels_root[5], stride=strides[5], channel_last=channel_last)
hidden['level5'] = r
logger.debug(r.shape)
pool_shape = r.shape[-2:]
if channel_last:
pool_shape = r.shape[1:3]
r = F.average_pooling(r, pool_shape, channel_last=channel_last)
with nn.parameter_scope("fc"):
r = pf_affine(r, num_classes, channel_last=channel_last)
logger.debug(r.shape)
return r, hidden
# Upsampling portion of DLA
def DLAUp(x, test, residual_root=False, channel_last=False):
r, hidden = dla_imagenet(
x, num_classes=1000, num_layers=34, test=test, channel_last=channel_last)
callback = NnpNetworkPass(True)
callback.remove_and_rewire('fc')
ochannels = [256, 128, 64, 32]
with nn.parameter_scope("up16"):
x = upsample(hidden['level5'], ochannels[0], test,
kernel_size=4, channel_last=channel_last)
hidden['up16'] = x
with nn.parameter_scope("up8"):
x = root(x, [hidden['level4']], ochannels[0], test,
kernel_size=3, channel_last=channel_last)
x = upsample(x, ochannels[1], test,
kernel_size=4, channel_last=channel_last)
hidden['up8'] = x
with nn.parameter_scope("up4"):
with nn.parameter_scope("residual_level3"):
level4up = upsample(
hidden['level4'], ochannels[1], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("level3up_root"):
level3up = root(
level4up, [hidden['level3']], ochannels[1], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("x_root"):
x = root(x, [level3up], ochannels[1], test,
kernel_size=1, channel_last=channel_last)
x = upsample(x, ochannels[2], test,
kernel_size=4, channel_last=channel_last)
hidden['up4'] = x
with nn.parameter_scope("up2_b"):
level3up_b = upsample(
level3up, ochannels[2], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("up2_c"):
level3up_c = upsample(
hidden['level3'], ochannels[2], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("level3up_c_root"):
level3up_c = root(hidden['level2'], [
level3up_c], ochannels[2], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("level2up_root"):
level2up = root(level3up_b, [level3up_c],
ochannels[2], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("x_root"):
x = root(x, [level2up], ochannels[2], test,
kernel_size=3, channel_last=channel_last)
return x
|
[
"nnabla.functions.max_pooling",
"nnabla.functions.concatenate",
"nnabla.parametric_functions.deconvolution",
"models.networks.initializers.bilinear_depthwise_initializer",
"numpy.transpose",
"nnabla.functions.relu",
"numpy.random.RandomState",
"nnabla.logger.logger.debug",
"nnabla.functions.reshape",
"nnabla.parametric_functions.batch_normalization",
"nnabla.parameter_scope",
"nnabla.functions.add2",
"nnabla.parametric_functions.convolution",
"nnabla.initializer.NormalInitializer",
"models.networks.initializers.he_initializer",
"nnabla.functions.average_pooling",
"nnabla.utils.nnp_graph.NnpNetworkPass"
] |
[((1166, 1192), 'numpy.random.RandomState', 'np.random.RandomState', (['(214)'], {}), '(214)\n', (1187, 1192), True, 'import numpy as np\n'), ((1483, 1665), 'nnabla.parametric_functions.deconvolution', 'PF.deconvolution', (['x', 'out_map', 'kernel'], {'pad': 'pad', 'stride': 'stride', 'dilation': 'dilation', 'w_init': 'w_init', 'with_bias': 'with_bias', 'b_init': 'b_init', 'group': 'out_map', 'channel_last': 'channel_last'}), '(x, out_map, kernel, pad=pad, stride=stride, dilation=\n dilation, w_init=w_init, with_bias=with_bias, b_init=b_init, group=\n out_map, channel_last=channel_last)\n', (1499, 1665), True, 'import nnabla.parametric_functions as PF\n'), ((1991, 2036), 'nnabla.functions.reshape', 'F.reshape', (['r', '(r.shape[0], -1)'], {'inplace': '(False)'}), '(r, (r.shape[0], -1), inplace=False)\n', (2000, 2036), True, 'import nnabla.functions as F\n'), ((2195, 2361), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', 'kernel'], {'stride': 'stride', 'pad': 'pad', 'dilation': 'dilation', 'with_bias': 'with_bias', 'w_init': 'w_init', 'b_init': 'b_init', 'channel_last': 'channel_last'}), '(x, ochannels, kernel, stride=stride, pad=pad, dilation=\n dilation, with_bias=with_bias, w_init=w_init, b_init=b_init,\n channel_last=channel_last)\n', (2209, 2361), True, 'import nnabla.parametric_functions as PF\n'), ((7084, 7110), 'numpy.random.RandomState', 'np.random.RandomState', (['(313)'], {}), '(313)\n', (7105, 7110), True, 'import numpy as np\n'), ((10581, 10602), 'nnabla.logger.logger.debug', 'logger.debug', (['x.shape'], {}), '(x.shape)\n', (10593, 10602), False, 'from nnabla.logger import logger\n'), ((10979, 11000), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (10991, 11000), False, 'from nnabla.logger import logger\n'), ((12862, 12921), 'nnabla.functions.average_pooling', 'F.average_pooling', (['r', 'pool_shape'], {'channel_last': 'channel_last'}), '(r, pool_shape, channel_last=channel_last)\n', (12879, 12921), True, 'import nnabla.functions as F\n'), ((13026, 13047), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (13038, 13047), False, 'from nnabla.logger import logger\n'), ((13289, 13309), 'nnabla.utils.nnp_graph.NnpNetworkPass', 'NnpNetworkPass', (['(True)'], {}), '(True)\n', (13303, 13309), False, 'from nnabla.utils.nnp_graph import NnpNetworkPass\n'), ((1440, 1474), 'numpy.transpose', 'np.transpose', (['w_init', '(0, 2, 3, 1)'], {}), '(w_init, (0, 2, 3, 1))\n', (1452, 1474), True, 'import numpy as np\n'), ((3820, 3877), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['h'], {'axes': 'axes', 'batch_stat': '(not test)'}), '(h, axes=axes, batch_stat=not test)\n', (3842, 3877), True, 'import nnabla.parametric_functions as PF\n'), ((3933, 3966), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""basicblock1"""'], {}), "('basicblock1')\n", (3951, 3966), True, 'import nnabla as nn\n'), ((4132, 4165), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""basicblock2"""'], {}), "('basicblock2')\n", (4150, 4165), True, 'import nnabla as nn\n'), ((4345, 4364), 'nnabla.functions.add2', 'F.add2', (['h', 'residual'], {}), '(h, residual)\n', (4351, 4364), True, 'import nnabla.functions as F\n'), ((4519, 4576), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['h'], {'axes': 'axes', 'batch_stat': '(not test)'}), '(h, axes=axes, batch_stat=not test)\n', (4541, 4576), True, 'import nnabla.parametric_functions as PF\n'), ((4646, 4679), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck1"""'], {}), "('bottleneck1')\n", (4664, 4679), True, 'import nnabla as nn\n'), ((4851, 4884), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck2"""'], {}), "('bottleneck2')\n", (4869, 4884), True, 'import nnabla as nn\n'), ((5070, 5103), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck3"""'], {}), "('bottleneck3')\n", (5088, 5103), True, 'import nnabla as nn\n'), ((5241, 5275), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck_s"""'], {}), "('bottleneck_s')\n", (5259, 5275), True, 'import nnabla as nn\n'), ((5373, 5385), 'nnabla.functions.add2', 'F.add2', (['h', 's'], {}), '(h, s)\n', (5379, 5385), True, 'import nnabla.functions as F\n'), ((6475, 6501), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""root"""'], {}), "('root')\n", (6493, 6501), True, 'import nnabla as nn\n'), ((6517, 6543), 'numpy.random.RandomState', 'np.random.RandomState', (['(313)'], {}), '(313)\n', (6538, 6543), True, 'import numpy as np\n'), ((6556, 6594), 'nnabla.functions.concatenate', 'F.concatenate', (['x', '*children'], {'axis': 'axes'}), '(x, *children, axis=axes)\n', (6569, 6594), True, 'import nnabla.functions as F\n'), ((6908, 6967), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (6930, 6967), True, 'import nnabla.parametric_functions as PF\n'), ((6980, 6989), 'nnabla.functions.relu', 'F.relu', (['x'], {}), '(x)\n', (6986, 6989), True, 'import nnabla.functions as F\n'), ((7156, 7180), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up"""'], {}), "('up')\n", (7174, 7180), True, 'import nnabla as nn\n'), ((8224, 8321), 'nnabla.functions.max_pooling', 'F.max_pooling', (['x'], {'kernel': '(stride, stride)', 'stride': '(stride, stride)', 'channel_last': 'channel_last'}), '(x, kernel=(stride, stride), stride=(stride, stride),\n channel_last=channel_last)\n', (8237, 8321), True, 'import nnabla.functions as F\n'), ((8574, 8640), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['residual'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(residual, axes=[axes], batch_stat=not test)\n', (8596, 8640), True, 'import nnabla.parametric_functions as PF\n'), ((8699, 8727), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""block1"""'], {}), "('block1')\n", (8717, 8727), True, 'import nnabla as nn\n'), ((8841, 8869), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""block2"""'], {}), "('block2')\n", (8859, 8869), True, 'import nnabla as nn\n'), ((9365, 9392), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""node1"""'], {}), "('node1')\n", (9383, 9392), True, 'import nnabla as nn\n'), ((9539, 9566), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""node2"""'], {}), "('node2')\n", (9557, 9566), True, 'import nnabla as nn\n'), ((10649, 10676), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (10667, 10676), True, 'import nnabla as nn\n'), ((11010, 11038), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level0"""'], {}), "('level0')\n", (11028, 11038), True, 'import nnabla as nn\n'), ((11264, 11285), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (11276, 11285), False, 'from nnabla.logger import logger\n'), ((11295, 11323), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level1"""'], {}), "('level1')\n", (11313, 11323), True, 'import nnabla as nn\n'), ((11549, 11570), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (11561, 11570), False, 'from nnabla.logger import logger\n'), ((11580, 11608), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level2"""'], {}), "('level2')\n", (11598, 11608), True, 'import nnabla as nn\n'), ((11802, 11823), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (11814, 11823), False, 'from nnabla.logger import logger\n'), ((11833, 11861), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level3"""'], {}), "('level3')\n", (11851, 11861), True, 'import nnabla as nn\n'), ((12148, 12169), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (12160, 12169), False, 'from nnabla.logger import logger\n'), ((12179, 12207), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level4"""'], {}), "('level4')\n", (12197, 12207), True, 'import nnabla as nn\n'), ((12494, 12515), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (12506, 12515), False, 'from nnabla.logger import logger\n'), ((12525, 12553), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level5"""'], {}), "('level5')\n", (12543, 12553), True, 'import nnabla as nn\n'), ((12747, 12768), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (12759, 12768), False, 'from nnabla.logger import logger\n'), ((12931, 12955), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc"""'], {}), "('fc')\n", (12949, 12955), True, 'import nnabla as nn\n'), ((13391, 13417), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up16"""'], {}), "('up16')\n", (13409, 13417), True, 'import nnabla as nn\n'), ((13577, 13602), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up8"""'], {}), "('up8')\n", (13595, 13602), True, 'import nnabla as nn\n'), ((13865, 13890), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up4"""'], {}), "('up4')\n", (13883, 13890), True, 'import nnabla as nn\n'), ((14588, 14615), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up2_b"""'], {}), "('up2_b')\n", (14606, 14615), True, 'import nnabla as nn\n'), ((14741, 14768), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up2_c"""'], {}), "('up2_c')\n", (14759, 14768), True, 'import nnabla as nn\n'), ((1929, 1967), 'nnabla.initializer.NormalInitializer', 'NormalInitializer', ([], {'sigma': '(0.01)', 'rng': 'RNG'}), '(sigma=0.01, rng=RNG)\n', (1946, 1967), False, 'from nnabla.initializer import UniformInitializer, ConstantInitializer, NormalInitializer, calc_normal_std_he_forward, calc_normal_std_he_backward\n'), ((2852, 2887), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""shortcut_conv"""'], {}), "('shortcut_conv')\n", (2870, 2887), True, 'import nnabla as nn\n'), ((2905, 3015), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', '(1, 1)'], {'stride': '(stride, stride)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, ochannels, (1, 1), stride=(stride, stride), with_bias=\n False, channel_last=channel_last)\n', (2919, 3015), True, 'import nnabla.parametric_functions as PF\n'), ((3058, 3117), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (3080, 3117), True, 'import nnabla.parametric_functions as PF\n'), ((3244, 3341), 'nnabla.functions.max_pooling', 'F.max_pooling', (['x'], {'kernel': '(stride, stride)', 'stride': '(stride, stride)', 'channel_last': 'channel_last'}), '(x, kernel=(stride, stride), stride=(stride, stride),\n channel_last=channel_last)\n', (3257, 3341), True, 'import nnabla.functions as F\n'), ((3448, 3547), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', '(1, 1)'], {'stride': '(1, 1)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, ochannels, (1, 1), stride=(1, 1), with_bias=False,\n channel_last=channel_last)\n', (3462, 3547), True, 'import nnabla.parametric_functions as PF\n'), ((3598, 3657), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (3620, 3657), True, 'import nnabla.parametric_functions as PF\n'), ((4195, 4291), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['h', 'ochannels', '(3, 3)'], {'pad': '(1, 1)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(h, ochannels, (3, 3), pad=(1, 1), with_bias=False,\n channel_last=channel_last)\n', (4209, 4291), True, 'import nnabla.parametric_functions as PF\n'), ((5120, 5205), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['h', 'ochannels', '(1, 1)'], {'with_bias': '(False)', 'channel_last': 'channel_last'}), '(h, ochannels, (1, 1), with_bias=False, channel_last=channel_last\n )\n', (5134, 5205), True, 'import nnabla.parametric_functions as PF\n'), ((7449, 7508), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (7471, 7508), True, 'import nnabla.parametric_functions as PF\n'), ((10861, 10920), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['r'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(r, axes=[axes], batch_stat=not test)\n', (10883, 10920), True, 'import nnabla.parametric_functions as PF\n'), ((13905, 13942), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""residual_level3"""'], {}), "('residual_level3')\n", (13923, 13942), True, 'import nnabla as nn\n'), ((14906, 14943), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level3up_c_root"""'], {}), "('level3up_c_root')\n", (14924, 14943), True, 'import nnabla as nn\n'), ((15113, 15148), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level2up_root"""'], {}), "('level2up_root')\n", (15131, 15148), True, 'import nnabla as nn\n'), ((15307, 15335), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""x_root"""'], {}), "('x_root')\n", (15325, 15335), True, 'import nnabla as nn\n'), ((3990, 4111), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', '(3, 3)'], {'stride': '(stride, stride)', 'pad': '(1, 1)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, ochannels, (3, 3), stride=(stride, stride), pad=(1, 1),\n with_bias=False, channel_last=channel_last)\n', (4004, 4111), True, 'import nnabla.parametric_functions as PF\n'), ((4716, 4801), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'hchannels', '(1, 1)'], {'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, hchannels, (1, 1), with_bias=False, channel_last=channel_last\n )\n', (4730, 4801), True, 'import nnabla.parametric_functions as PF\n'), ((4921, 5033), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['h', 'hchannels', '(3, 3)'], {'pad': '(1, 1)', 'stride': 'stride', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(h, hchannels, (3, 3), pad=(1, 1), stride=stride, with_bias=\n False, channel_last=channel_last)\n', (4935, 5033), True, 'import nnabla.parametric_functions as PF\n'), ((6249, 6306), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': 'axes', 'batch_stat': '(not test)'}), '(x, axes=axes, batch_stat=not test)\n', (6271, 6306), True, 'import nnabla.parametric_functions as PF\n'), ((6803, 6846), 'models.networks.initializers.he_initializer', 'he_initializer', (['ochannels', 'kernel_size', 'rng'], {}), '(ochannels, kernel_size, rng)\n', (6817, 6846), False, 'from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer\n'), ((7324, 7367), 'models.networks.initializers.he_initializer', 'he_initializer', (['ochannels', 'kernel_size', 'rng'], {}), '(ochannels, kernel_size, rng)\n', (7338, 7367), False, 'from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer\n'), ((7829, 7883), 'models.networks.initializers.bilinear_depthwise_initializer', 'bilinear_depthwise_initializer', (['ichannels', 'kernel_size'], {}), '(ichannels, kernel_size)\n', (7859, 7883), False, 'from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer\n'), ((14090, 14125), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level3up_root"""'], {}), "('level3up_root')\n", (14108, 14125), True, 'import nnabla as nn\n'), ((14289, 14317), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""x_root"""'], {}), "('x_root')\n", (14307, 14317), True, 'import nnabla as nn\n')]
|
import click
import spacy
from tika import parser
import re
import math
import string
import sys
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
import os
def preprocess(doc):
"""
Used to preprocess the parsed resumes
"""
doc = doc.replace("\n", " ")
doc = doc.replace("•","")
doc = doc.replace("–","")
doc = doc.replace("\t"," ")
doc = doc.strip()
return doc
"""
Document distance routine using cosine similarity
"""
# splitting the text lines into words
# translation table is a global variable
# mapping upper case to lower case and
# punctuation to spaces
translation_table = str.maketrans(string.punctuation+string.ascii_uppercase,
" "*len(string.punctuation)+string.ascii_lowercase)
def get_words_from_line_list(text):
"""
Applies Translations and returns the list of words from the text document
"""
text = text.translate(translation_table)
word_list = [x for x in text.split() if x not in set(stopwords.words('english'))]
return word_list
def count_frequency(word_list):
"""
Counts frequency of each word and returns
a dictionary which maps the word to their frequency
"""
D = {}
for new_word in word_list:
if new_word in D:
D[new_word] = D[new_word] + 1
else:
D[new_word] = 1
return D
def word_frequencies_for_text(text):
"""
Returns dictionary of (word, frequency)
pairs from the previous dictionary
"""
line_list = text
word_list = get_words_from_line_list(line_list)
freq_mapping = count_frequency(word_list)
return freq_mapping
def dotProduct(D1, D2):
"""
Returns the dot product of two documents
"""
Sum = 0.0
for key in D1:
if key in D2:
Sum += (D1[key] * D2[key])
return Sum
def vector_angle(D1, D2):
"""
Returns the angle angle in radians between the vectors
"""
numerator = dotProduct(D1, D2)
denominator = math.sqrt(dotProduct(D1, D1)*dotProduct(D2, D2))
return math.acos(numerator / denominator)
def documentSimilarity(text_1, text_2):
"""
Returns the document similarity between two documents (in degrees);
The lower the better is the similarity
"""
sorted_word_list_1 = word_frequencies_for_text(text_1)
sorted_word_list_2 = word_frequencies_for_text(text_2)
distance = vector_angle(sorted_word_list_1, sorted_word_list_2)
return math.degrees(distance)
def deg_score(resume):
"""
Returns the score of resume based on Degree
"""
deg_score = 0
for word in resume.split(" "):
if word.strip() in ["PhD","PHD","Research Associate"]:
deg_score=3
elif word.strip() in ["MS","MT","M.Tech","Masters"]:
if deg_score<2:
deg_score=2
elif word.strip() in ["BS","BE","B.S","B.E","B.Tech","Bachelors"]:
if deg_score<1:
deg_score=1
return deg_score
def des_score(resume):
"""
Returns resume score based on Designation
"""
des_score = 0
for word in resume.split(" "):
if word.strip() in ["Sr.","Senior"]:
if des_score<3:
des_score=3
elif word.strip() in ["Associate", "Scientist", "Engineer"]:
if des_score<2:
des_score=2
elif word.strip() in ["Analyst", "Junior"]:
if des_score<1:
des_score=1
return des_score
def exp_score(resume):
"""
Returns resume score based on Experience
"""
exp_score = 0
a = re.findall(r'[0-9]+\+*[ ]?[Yy]ear',resume)
a.sort()
if len(a)>0:
exp = a[len(a)-1].lower().split("y")[0].strip()
if "+" in exp :
exp = exp[:-1]
exp = int(exp)
if exp>=4:
exp_score=3
elif exp>=2:
exp_score=2
elif exp==1:
exp_score=1
return exp_score
def skill_score(doc_resume,jd):
"""
Returns resume score based on skills
"""
skill_list = [tok.text for tok in doc_resume if tok.ent_type_=="Skills"]
skill_text = " ".join(skill_list)
skill_score = 0
if len(skill_list)>0:
skill_match = 90.0-documentSimilarity(jd,skill_text)
## Skills are matched on a scale of 0-10
skill_score = min(10,skill_match)
return skill_score
def resume_score(resume,jd):
"""
Returns score based on overall similarity between resume and the Job Description
"""
resume_match = 90-documentSimilarity(jd, resume)
resume_score = min(20,resume_match)
return resume_score
def overall_resume_score(doc_resume,resume,jd):
"""
Collates and the returns the single combined score due to various factors
"""
score = round(10/7*(deg_score(resume)*0.20+des_score(resume)*0.20
+exp_score(resume)*0.20+skill_score(doc_resume,jd)*0.30+resume_score(resume,jd)*0.10),1)
return score
@click.command()
@click.option("--res","-r","in_file",default=os.path.join("./Resume",os.listdir("./Resume")[0]),
help = "Path to the resume.",
)
@click.option("--jd","-j","jd_file",default=os.path.join("./JD",os.listdir("./JD")[0]),
help="Path to Job Description")
@click.option("--out-file","-o",default="./Summary/output.txt",
help = "Path to the resume summary")
def process(in_file,jd_file,out_file):
"""
The CLI function
"""
ner_model = spacy.load(os.path.join(os.path.dirname(os.getcwd())
,'Training_NER/saved-NER.model'))
parser_resume = parser.from_file(in_file)
resume = parser_resume['content']
resume = preprocess(resume)
doc_resume = ner_model(resume)
file = open(out_file,"w")
info = ["Name","Email Address","Location","College Name","Degree","Designation",
"Years of Experience","Skills"]
for i in info :
file.write(i+":\n")
text_list = [tok.text for tok in doc_resume if tok.ent_type_==i]
text = " ".join(text_list)
file.write(text)
file.write("\n\n")
parser_jd = parser.from_file(jd_file)
jd = parser_jd["content"]
jd = preprocess(jd)
score = overall_resume_score(doc_resume,resume,jd)
file.write("Resume Score(1-10):\n")
file.write(str(score)+"\n")
file.close()
if __name__=="__main__":
process()
|
[
"os.getcwd",
"click.option",
"click.command",
"math.acos",
"re.findall",
"tika.parser.from_file",
"nltk.corpus.stopwords.words",
"nltk.download",
"math.degrees",
"os.listdir"
] |
[((143, 169), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (156, 169), False, 'import nltk\n'), ((5016, 5031), 'click.command', 'click.command', ([], {}), '()\n', (5029, 5031), False, 'import click\n'), ((5282, 5386), 'click.option', 'click.option', (['"""--out-file"""', '"""-o"""'], {'default': '"""./Summary/output.txt"""', 'help': '"""Path to the resume summary"""'}), "('--out-file', '-o', default='./Summary/output.txt', help=\n 'Path to the resume summary')\n", (5294, 5386), False, 'import click\n'), ((2074, 2108), 'math.acos', 'math.acos', (['(numerator / denominator)'], {}), '(numerator / denominator)\n', (2083, 2108), False, 'import math\n'), ((2487, 2509), 'math.degrees', 'math.degrees', (['distance'], {}), '(distance)\n', (2499, 2509), False, 'import math\n'), ((3633, 3676), 're.findall', 're.findall', (['"""[0-9]+\\\\+*[ ]?[Yy]ear"""', 'resume'], {}), "('[0-9]+\\\\+*[ ]?[Yy]ear', resume)\n", (3643, 3676), False, 'import re\n'), ((5590, 5615), 'tika.parser.from_file', 'parser.from_file', (['in_file'], {}), '(in_file)\n', (5606, 5615), False, 'from tika import parser\n'), ((6104, 6129), 'tika.parser.from_file', 'parser.from_file', (['jd_file'], {}), '(jd_file)\n', (6120, 6129), False, 'from tika import parser\n'), ((5519, 5530), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5528, 5530), False, 'import os\n'), ((5101, 5123), 'os.listdir', 'os.listdir', (['"""./Resume"""'], {}), "('./Resume')\n", (5111, 5123), False, 'import os\n'), ((5225, 5243), 'os.listdir', 'os.listdir', (['"""./JD"""'], {}), "('./JD')\n", (5235, 5243), False, 'import os\n'), ((1001, 1027), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1016, 1027), False, 'from nltk.corpus import stopwords\n')]
|
"""
This module implements plotting functions useful to report analysis results.
Author: <NAME>, <NAME>, 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
from nilearn.glm.first_level import check_design_matrix
from nilearn.glm.contrasts import expression_to_contrast_vector
def plot_design_matrix(design_matrix, rescale=True, ax=None, output_file=None):
"""Plot a design matrix provided as a DataFrame
Parameters
----------
design matrix : pandas DataFrame,
Describes a design matrix.
rescale : bool, optional
Rescale columns magnitude for visualization or not.
ax : axis handle, optional
Handle to axis onto which we will draw design matrix.
output_file : string or None, optional,
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
Returns
-------
ax: axis handle
The axis used for plotting.
"""
# We import _set_mpl_backend because just the fact that we are
# importing it sets the backend
# normalize the values per column for better visualization
_, X, names = check_design_matrix(design_matrix)
if rescale:
X = X / np.maximum(1.e-12, np.sqrt(
np.sum(X ** 2, 0))) # pylint: disable=no-member
if ax is None:
max_len = np.max([len(str(name)) for name in names])
fig_height = 1 + .1 * X.shape[0] + .04 * max_len
if fig_height < 3:
fig_height = 3
elif fig_height > 10:
fig_height = 10
plt.figure(figsize=(1 + .23 * len(names), fig_height))
ax = plt.subplot(1, 1, 1)
ax.imshow(X, interpolation='nearest', aspect='auto')
ax.set_label('conditions')
ax.set_ylabel('scan number')
ax.set_xticks(range(len(names)))
ax.set_xticklabels(names, rotation=60, ha='left')
# Set ticks above, to have a display more similar to the display of a
# corresponding dataframe
ax.xaxis.tick_top()
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file)
plt.close()
ax = None
return ax
def plot_event(model_event, cmap=None, output_file=None, **fig_kwargs):
"""Creates plot for event visualization.
Parameters
----------
model_event : pandas DataFrame or list of pandas DataFrame
the `pandas.DataFrame` must have three columns
``event_type`` with event name, ``onset`` and ``duration``.
The `pandas.DataFrame` can also be obtained from
:func:`nilearn.glm.first_level.first_level_from_bids`.
cmap : str or matplotlib.cmap, optional
the colormap used to label different events
output_file : string or None, optional,
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
**fig_kwargs : extra keyword arguments, optional
Extra arguments passed to matplotlib.pyplot.subplots
Returns
-------
Plot Figure object
"""
if isinstance(model_event, pd.DataFrame):
model_event = [model_event]
n_runs = len(model_event)
figure, ax = plt.subplots(1, 1, **fig_kwargs)
# input validation
if cmap is None:
cmap = plt.cm.tab20
elif isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
else:
cmap = cmap
event_labels = pd.concat(event['trial_type'] for event in model_event)
event_labels = np.unique(event_labels)
cmap_dictionary = {label:idx for idx, label in enumerate(event_labels)}
if len(event_labels) > cmap.N:
plt.close()
raise ValueError("The number of event types is greater than "+ \
" colors in colormap (%d > %d). Use a different colormap." \
% (len(event_labels), cmap.N))
for idx_run, event_df in enumerate(model_event):
for _, event in event_df.iterrows():
event_onset = event['onset']
event_end = event['onset'] + event['duration']
color = cmap.colors[cmap_dictionary[event['trial_type']]]
ax.axvspan(event_onset,
event_end,
ymin=(idx_run + .25) / n_runs,
ymax=(idx_run + .75) / n_runs,
facecolor=color)
handles = []
for label, idx in cmap_dictionary.items():
patch = mpatches.Patch(color=cmap.colors[idx], label=label)
handles.append(patch)
_ = ax.legend(handles=handles, ncol=4)
ax.set_xlabel("Time (sec.)")
ax.set_ylabel("Runs")
ax.set_ylim(0, n_runs)
ax.set_yticks(np.arange(n_runs) + .5)
ax.set_yticklabels(np.arange(n_runs) + 1)
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file)
plt.close()
figure = None
return figure
def plot_contrast_matrix(contrast_def, design_matrix, colorbar=False, ax=None,
output_file=None):
"""Creates plot for contrast definition.
Parameters
----------
contrast_def : str or array of shape (n_col) or list of (string or
array of shape (n_col))
where ``n_col`` is the number of columns of the design matrix, (one
array per run). If only one array is provided when there are several
runs, it will be assumed that the same contrast is desired for all
runs. The string can be a formula compatible with
`pandas.DataFrame.eval`. Basically one can use the name of the
conditions as they appear in the design matrix of the fitted model
combined with operators +- and combined with numbers with operators
+-`*`/.
design_matrix : pandas DataFrame
colorbar : Boolean, optional (default False)
Include a colorbar in the contrast matrix plot.
ax : matplotlib Axes object, optional (default None)
Directory where plotted figures will be stored.
output_file : string or None, optional,
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
Returns
-------
Plot Axes object
"""
design_column_names = design_matrix.columns.tolist()
if isinstance(contrast_def, str):
contrast_def = expression_to_contrast_vector(
contrast_def, design_column_names)
maxval = np.max(np.abs(contrast_def))
con_matrix = np.asmatrix(contrast_def)
max_len = np.max([len(str(name)) for name in design_column_names])
if ax is None:
plt.figure(figsize=(.4 * len(design_column_names),
1 + .5 * con_matrix.shape[0] + .04 * max_len))
ax = plt.gca()
mat = ax.matshow(con_matrix, aspect='equal',
cmap='gray', vmin=-maxval, vmax=maxval)
ax.set_label('conditions')
ax.set_ylabel('')
ax.set_yticks(())
ax.xaxis.set(ticks=np.arange(len(design_column_names)))
ax.set_xticklabels(design_column_names, rotation=50, ha='left')
if colorbar:
plt.colorbar(mat, fraction=0.025, pad=0.04)
plt.tight_layout()
plt.subplots_adjust(top=np.min([.3 + .05 * con_matrix.shape[0], .55]))
if output_file is not None:
plt.savefig(output_file)
plt.close()
ax = None
return ax
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.get_cmap",
"numpy.sum",
"matplotlib.pyplot.close",
"nilearn.glm.first_level.check_design_matrix",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"nilearn.glm.contrasts.expression_to_contrast_vector",
"numpy.min",
"numpy.asmatrix",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.patches.Patch",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"numpy.unique"
] |
[((1283, 1317), 'nilearn.glm.first_level.check_design_matrix', 'check_design_matrix', (['design_matrix'], {}), '(design_matrix)\n', (1302, 1317), False, 'from nilearn.glm.first_level import check_design_matrix\n'), ((2132, 2150), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2148, 2150), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3400), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1, **fig_kwargs)\n', (3380, 3400), True, 'import matplotlib.pyplot as plt\n'), ((3590, 3645), 'pandas.concat', 'pd.concat', (["(event['trial_type'] for event in model_event)"], {}), "(event['trial_type'] for event in model_event)\n", (3599, 3645), True, 'import pandas as pd\n'), ((3665, 3688), 'numpy.unique', 'np.unique', (['event_labels'], {}), '(event_labels)\n', (3674, 3688), True, 'import numpy as np\n'), ((4912, 4930), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4928, 4930), True, 'import matplotlib.pyplot as plt\n'), ((6708, 6733), 'numpy.asmatrix', 'np.asmatrix', (['contrast_def'], {}), '(contrast_def)\n', (6719, 6733), True, 'import numpy as np\n'), ((7381, 7399), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7397, 7399), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1784), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1775, 1784), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (2202, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2235), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2233, 2235), True, 'import matplotlib.pyplot as plt\n'), ((3810, 3821), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3819, 3821), True, 'import matplotlib.pyplot as plt\n'), ((4602, 4653), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'cmap.colors[idx]', 'label': 'label'}), '(color=cmap.colors[idx], label=label)\n', (4616, 4653), True, 'import matplotlib.patches as mpatches\n'), ((4971, 4995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (4982, 4995), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5015), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5013, 5015), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6635), 'nilearn.glm.contrasts.expression_to_contrast_vector', 'expression_to_contrast_vector', (['contrast_def', 'design_column_names'], {}), '(contrast_def, design_column_names)\n', (6600, 6635), False, 'from nilearn.glm.contrasts import expression_to_contrast_vector\n'), ((6669, 6689), 'numpy.abs', 'np.abs', (['contrast_def'], {}), '(contrast_def)\n', (6675, 6689), True, 'import numpy as np\n'), ((6976, 6985), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6983, 6985), True, 'import matplotlib.pyplot as plt\n'), ((7332, 7375), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mat'], {'fraction': '(0.025)', 'pad': '(0.04)'}), '(mat, fraction=0.025, pad=0.04)\n', (7344, 7375), True, 'import matplotlib.pyplot as plt\n'), ((7516, 7540), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (7527, 7540), True, 'import matplotlib.pyplot as plt\n'), ((7549, 7560), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7558, 7560), True, 'import matplotlib.pyplot as plt\n'), ((3521, 3539), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (3533, 3539), True, 'import matplotlib.pyplot as plt\n'), ((4833, 4850), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (4842, 4850), True, 'import numpy as np\n'), ((4880, 4897), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (4889, 4897), True, 'import numpy as np\n'), ((7428, 7476), 'numpy.min', 'np.min', (['[0.3 + 0.05 * con_matrix.shape[0], 0.55]'], {}), '([0.3 + 0.05 * con_matrix.shape[0], 0.55])\n', (7434, 7476), True, 'import numpy as np\n'), ((1390, 1407), 'numpy.sum', 'np.sum', (['(X ** 2)', '(0)'], {}), '(X ** 2, 0)\n', (1396, 1407), True, 'import numpy as np\n')]
|
"""Date and time functions
Refactored from Cufflinks' 'date_tools.py' module.
Credits to @jorgesantos.
"""
import datetime as dt
def get_date_from_today(delta, strfmt='%Y%m%d'):
""" Returns a string that represents a date n numbers of days from today.
Parameters
----------
delta : int
number of days
strfmt : string
format in which the date will be represented
"""
return (dt.date.today() + dt.timedelta(delta)).strftime(strfmt)
def string_to_date(string_date, strfmt='%Y%m%d'):
""" Converts a string format date into datetime.
Parameters
----------
string_date : string
date in string format
strfmt : string
format in which the input date is represented
"""
return dt.datetime.strptime(string_date, strfmt).date()
def int_to_date(int_date):
""" Converts an int format date into datetime.
Parameters
----------
int_date : int
date in int format
Example
-------
int_date(20151023)
"""
return string_to_date(str(int_date))
def date_to_int(date, strfmt='%Y%m%d'):
""" Converts a datetime date into int.
Parameters
----------
date : datetime
date in datetime format
strfmt : string
format in which the int date will be generated
Example
-------
date_to_int(dt.date(2015,10,23),'%Y')
"""
return int(date.strftime(strfmt))
|
[
"datetime.datetime.strptime",
"datetime.timedelta",
"datetime.date.today"
] |
[((797, 838), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['string_date', 'strfmt'], {}), '(string_date, strfmt)\n', (817, 838), True, 'import datetime as dt\n'), ((440, 455), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (453, 455), True, 'import datetime as dt\n'), ((458, 477), 'datetime.timedelta', 'dt.timedelta', (['delta'], {}), '(delta)\n', (470, 477), True, 'import datetime as dt\n')]
|
import pandas as pd
import numpy as np
import pickle
import json
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Model
from keras.optimizers import RMSprop, Adam
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from gensim.models import Word2Vec, KeyedVectors
from deepmm.models import DeepMultimodalModel
# Read some data that contains a mix of categorical and text-based features
# - e.g. Mercari Price Suggestion Challenge https://www.kaggle.com/c/mercari-price-suggestion-challenge/data
df = pd.read_csv('data.csv')
# Load pretrained embeddings
w2v = KeyedVectors.load_word2vec_format('embeddings_w2v.txt')
# Hyperparameters for text tokenization
EMBEDDING_DIM = 100
NUM_MAX_WORDS = 500
MAX_LEN = 150
X_nlp = df['TEXT']
# Tokenize text documents via keras tokenizer
tok = Tokenizer(num_words=NUM_MAX_WORDS)
tok.fit_on_texts(X_nlp)
sequences = tok.texts_to_sequences(X_nlp)
sequences_matrix = sequence.pad_sequences(sequences, maxlen=MAX_LEN)
word_index = tok.word_index
print('Found %s unique tokens.' % len(word_index))
vocabulary_size = min(len(word_index)+1, NUM_MAX_WORDS)
embedding_matrix = np.zeros((vocabulary_size, EMBEDDING_DIM))
# Preparing the embedding matrix:
# We only take the embeddings that are neccessarry for the given vocabulary
num_none = 0
for word, i in word_index.items():
if i>=NUM_MAX_WORDS:
continue
try:
embedding_vector = w2v[word]
embedding_matrix[i] = embedding_vector
except KeyError:
embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),EMBEDDING_DIM)
num_none = num_none+1
# Define categorical features and target
cat_features = ['C1', 'C2', 'C3', 'C4']
target = ['TARGET']
# Label encode the categories for each categorical feature (numeric value is needed for feeding into keras model)
X_categorical = []
label_encoder = []
for feature in cat_features:
le = LabelEncoder()
X_categorical.append(pd.DataFrame(le.fit_transform(df[feature]), columns=[feature]))
label_encoder.append(le)
Y = df[target]
# Split all data into training and test chunks
# IMPORTANT: Make sure that textual and categorical data is properly aligned (e.g. here choose same random_state)!
X_nlp_train_all, X_nlp_test_all, y_train_all, y_test_all = train_test_split(sequences_matrix, Y, random_state=42)
# Split sparse part into train and test
X_categorical_train = []
X_categorical_test = []
for X_category in X_categorical:
tr, te, y_train_catembeddings, y_test_catembeddings = train_test_split(X_category, Y, random_state=42)
X_categorical_train.append(tr)
X_categorical_test.append(te)
X_train_catembeddings = X_categorical_train
X_train_all = X_categorical_train
X_train_all.append(X_nlp_train_all)
X_test_all = X_categorical_test
X_test_catembeddings = X_categorical_test
X_test_all.append(X_nlp_test_all)
# Get cardinality of each categorical variable
num_unique_categories = [df[cat].nunique() for cat in cat_features]
# Setup model object
model = DeepMultimodalModel(task='regression', num_unique_categories=num_unique_categories, cat_embedding_dim=16,
txt_vocab_size=vocabulary_size, txt_embedding_dim=EMBEDDING_DIM, txt_max_len=MAX_LEN,
txt_weights=embedding_matrix,
cat_hidden_neurons=[100,50,10], cat_dropout=[0.1, 0.2, 0.2], cat_bi_interaction=True,
txt_lstm_neurons=32, txt_dropout=0.2, final_hidden_neurons=[64, 32], final_dropout=[0.3, 0.3])
model.compile("adam", "mse", metrics=['mse', 'mae'], )
# Fit model
hist = model.fit(X_train_all, y_train_all, epochs=100, batch_size=256, validation_split=0.2)
|
[
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"sklearn.preprocessing.LabelEncoder",
"keras.preprocessing.text.Tokenizer",
"gensim.models.KeyedVectors.load_word2vec_format",
"deepmm.models.DeepMultimodalModel",
"numpy.sqrt"
] |
[((687, 710), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (698, 710), True, 'import pandas as pd\n'), ((748, 803), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['"""embeddings_w2v.txt"""'], {}), "('embeddings_w2v.txt')\n", (781, 803), False, 'from gensim.models import Word2Vec, KeyedVectors\n'), ((972, 1006), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'NUM_MAX_WORDS'}), '(num_words=NUM_MAX_WORDS)\n', (981, 1006), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1092, 1141), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['sequences'], {'maxlen': 'MAX_LEN'}), '(sequences, maxlen=MAX_LEN)\n', (1114, 1141), False, 'from keras.preprocessing import sequence\n'), ((1297, 1339), 'numpy.zeros', 'np.zeros', (['(vocabulary_size, EMBEDDING_DIM)'], {}), '((vocabulary_size, EMBEDDING_DIM))\n', (1305, 1339), True, 'import numpy as np\n'), ((2439, 2493), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sequences_matrix', 'Y'], {'random_state': '(42)'}), '(sequences_matrix, Y, random_state=42)\n', (2455, 2493), False, 'from sklearn.model_selection import train_test_split\n'), ((3164, 3595), 'deepmm.models.DeepMultimodalModel', 'DeepMultimodalModel', ([], {'task': '"""regression"""', 'num_unique_categories': 'num_unique_categories', 'cat_embedding_dim': '(16)', 'txt_vocab_size': 'vocabulary_size', 'txt_embedding_dim': 'EMBEDDING_DIM', 'txt_max_len': 'MAX_LEN', 'txt_weights': 'embedding_matrix', 'cat_hidden_neurons': '[100, 50, 10]', 'cat_dropout': '[0.1, 0.2, 0.2]', 'cat_bi_interaction': '(True)', 'txt_lstm_neurons': '(32)', 'txt_dropout': '(0.2)', 'final_hidden_neurons': '[64, 32]', 'final_dropout': '[0.3, 0.3]'}), "(task='regression', num_unique_categories=\n num_unique_categories, cat_embedding_dim=16, txt_vocab_size=\n vocabulary_size, txt_embedding_dim=EMBEDDING_DIM, txt_max_len=MAX_LEN,\n txt_weights=embedding_matrix, cat_hidden_neurons=[100, 50, 10],\n cat_dropout=[0.1, 0.2, 0.2], cat_bi_interaction=True, txt_lstm_neurons=\n 32, txt_dropout=0.2, final_hidden_neurons=[64, 32], final_dropout=[0.3,\n 0.3])\n", (3183, 3595), False, 'from deepmm.models import DeepMultimodalModel\n'), ((2066, 2080), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2078, 2080), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2676, 2724), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_category', 'Y'], {'random_state': '(42)'}), '(X_category, Y, random_state=42)\n', (2692, 2724), False, 'from sklearn.model_selection import train_test_split\n'), ((1704, 1717), 'numpy.sqrt', 'np.sqrt', (['(0.25)'], {}), '(0.25)\n', (1711, 1717), True, 'import numpy as np\n')]
|
"""Tests for scandir.scandir()."""
from __future__ import unicode_literals
import os
import shutil
import sys
import time
import unittest
try:
import scandir
has_scandir = True
except ImportError:
has_scandir = False
FILE_ATTRIBUTE_DIRECTORY = 16
TEST_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdir'))
IS_PY3 = sys.version_info >= (3, 0)
if IS_PY3:
int_types = int
else:
int_types = (int, long)
str = unicode
if hasattr(os, 'symlink'):
try:
link_name = os.path.join(os.path.dirname(__file__), '_testlink')
os.symlink(__file__, link_name)
os.remove(link_name)
symlinks_supported = True
except NotImplementedError:
# Windows versions before Vista don't support symbolic links
symlinks_supported = False
else:
symlinks_supported = False
def create_file(path, contents='1234'):
with open(path, 'w') as f:
f.write(contents)
def setup_main():
join = os.path.join
os.mkdir(TEST_PATH)
os.mkdir(join(TEST_PATH, 'subdir'))
create_file(join(TEST_PATH, 'file1.txt'))
create_file(join(TEST_PATH, 'file2.txt'), contents='12345678')
os.mkdir(join(TEST_PATH, 'subdir', 'unidir\u018F'))
create_file(join(TEST_PATH, 'subdir', 'file1.txt'))
create_file(join(TEST_PATH, 'subdir', 'unicod\u018F.txt'))
create_file(join(TEST_PATH, 'subdir', 'unidir\u018F', 'file1.txt'))
os.mkdir(join(TEST_PATH, 'linkdir'))
def setup_symlinks():
join = os.path.join
os.mkdir(join(TEST_PATH, 'linkdir', 'linksubdir'))
create_file(join(TEST_PATH, 'linkdir', 'file1.txt'))
os.symlink(os.path.abspath(join(TEST_PATH, 'linkdir', 'file1.txt')),
join(TEST_PATH, 'linkdir', 'link_to_file'))
dir_name = os.path.abspath(join(TEST_PATH, 'linkdir', 'linksubdir'))
dir_link = join(TEST_PATH, 'linkdir', 'link_to_dir')
if sys.version_info >= (3, 3):
# "target_is_directory" was only added in Python 3.3
os.symlink(dir_name, dir_link, target_is_directory=True)
else:
os.symlink(dir_name, dir_link)
def teardown():
try:
shutil.rmtree(TEST_PATH)
except OSError:
# why does the above fail sometimes?
time.sleep(0.1)
shutil.rmtree(TEST_PATH)
class TestMixin(object):
def setUp(self):
if not os.path.exists(TEST_PATH):
setup_main()
if symlinks_supported and not os.path.exists(
os.path.join(TEST_PATH, 'linkdir', 'linksubdir')):
setup_symlinks()
if not hasattr(unittest.TestCase, 'skipTest'):
def skipTest(self, reason):
sys.stdout.write('skipped {0!r} '.format(reason))
def test_basic(self):
entries = sorted(self.scandir_func(TEST_PATH), key=lambda e: e.name)
self.assertEqual([(e.name, e.is_dir()) for e in entries],
[('file1.txt', False), ('file2.txt', False),
('linkdir', True), ('subdir', True)])
self.assertEqual([e.path for e in entries],
[os.path.join(TEST_PATH, e.name) for e in entries])
def test_dir_entry(self):
entries = dict((e.name, e) for e in self.scandir_func(TEST_PATH))
e = entries['file1.txt']
self.assertEqual([e.is_dir(), e.is_file(), e.is_symlink()], [False, True, False])
e = entries['file2.txt']
self.assertEqual([e.is_dir(), e.is_file(), e.is_symlink()], [False, True, False])
e = entries['subdir']
self.assertEqual([e.is_dir(), e.is_file(), e.is_symlink()], [True, False, False])
self.assertEqual(entries['file1.txt'].stat().st_size, 4)
self.assertEqual(entries['file2.txt'].stat().st_size, 8)
def test_stat(self):
entries = list(self.scandir_func(TEST_PATH))
for entry in entries:
os_stat = os.stat(os.path.join(TEST_PATH, entry.name))
scandir_stat = entry.stat()
self.assertEqual(os_stat.st_mode, scandir_stat.st_mode)
# TODO: be nice to figure out why these aren't identical on Windows and on PyPy
# * Windows: they seem to be a few microseconds to tens of seconds out
# * PyPy: for some reason os_stat's times are nanosecond, scandir's are not
self.assertAlmostEqual(os_stat.st_mtime, scandir_stat.st_mtime, delta=1)
self.assertAlmostEqual(os_stat.st_ctime, scandir_stat.st_ctime, delta=1)
if entry.is_file():
self.assertEqual(os_stat.st_size, scandir_stat.st_size)
def test_returns_iter(self):
it = self.scandir_func(TEST_PATH)
entry = next(it)
assert hasattr(entry, 'name')
def check_file_attributes(self, result):
self.assertTrue(hasattr(result, 'st_file_attributes'))
self.assertTrue(isinstance(result.st_file_attributes, int_types))
self.assertTrue(0 <= result.st_file_attributes <= 0xFFFFFFFF)
def test_file_attributes(self):
if sys.platform != 'win32' or not self.has_file_attributes:
# st_file_attributes is Win32 specific (but can't use
# unittest.skipUnless on Python 2.6)
return self.skipTest('st_file_attributes not supported')
entries = dict((e.name, e) for e in self.scandir_func(TEST_PATH))
# test st_file_attributes on a file (FILE_ATTRIBUTE_DIRECTORY not set)
result = entries['file1.txt'].stat()
self.check_file_attributes(result)
self.assertEqual(result.st_file_attributes & FILE_ATTRIBUTE_DIRECTORY, 0)
# test st_file_attributes on a directory (FILE_ATTRIBUTE_DIRECTORY set)
result = entries['subdir'].stat()
self.check_file_attributes(result)
self.assertEqual(result.st_file_attributes & FILE_ATTRIBUTE_DIRECTORY,
FILE_ATTRIBUTE_DIRECTORY)
def test_path(self):
entries = sorted(self.scandir_func(TEST_PATH), key=lambda e: e.name)
self.assertEqual([os.path.basename(e.name) for e in entries],
['file1.txt', 'file2.txt', 'linkdir', 'subdir'])
self.assertEqual([os.path.normpath(os.path.join(TEST_PATH, e.name)) for e in entries],
[os.path.normpath(e.path) for e in entries])
def test_symlink(self):
if not symlinks_supported:
return self.skipTest('symbolic links not supported')
entries = sorted(self.scandir_func(os.path.join(TEST_PATH, 'linkdir')),
key=lambda e: e.name)
self.assertEqual([(e.name, e.is_symlink()) for e in entries],
[('file1.txt', False),
('link_to_dir', True),
('link_to_file', True),
('linksubdir', False)])
self.assertEqual([(e.name, e.is_file(), e.is_file(follow_symlinks=False))
for e in entries],
[('file1.txt', True, True),
('link_to_dir', False, False),
('link_to_file', True, False),
('linksubdir', False, False)])
self.assertEqual([(e.name, e.is_dir(), e.is_dir(follow_symlinks=False))
for e in entries],
[('file1.txt', False, False),
('link_to_dir', True, False),
('link_to_file', False, False),
('linksubdir', True, True)])
def test_bytes(self):
# Check that unicode filenames are returned correctly as bytes in output
path = os.path.join(TEST_PATH, 'subdir').encode(sys.getfilesystemencoding(), 'replace')
self.assertTrue(isinstance(path, bytes))
# Python 3.6 on Windows fixes the bytes filename thing by using UTF-8
if IS_PY3 and sys.platform == 'win32':
if not (sys.version_info >= (3, 6) and self.scandir_func == os.scandir):
self.assertRaises(TypeError, self.scandir_func, path)
return
entries = [e for e in self.scandir_func(path) if e.name.startswith(b'unicod')]
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertTrue(isinstance(entry.name, bytes))
self.assertTrue(isinstance(entry.path, bytes))
# b'unicod?.txt' on Windows, b'unicod\xc6\x8f.txt' (UTF-8) or similar on POSIX
entry_name = 'unicod\u018f.txt'.encode(sys.getfilesystemencoding(), 'replace')
self.assertEqual(entry.name, entry_name)
self.assertEqual(entry.path, os.path.join(path, entry_name))
def test_unicode(self):
# Check that unicode filenames are returned correctly as (unicode) str in output
path = os.path.join(TEST_PATH, 'subdir')
if not IS_PY3:
path = path.decode(sys.getfilesystemencoding(), 'replace')
self.assertTrue(isinstance(path, str))
entries = [e for e in self.scandir_func(path) if e.name.startswith('unicod')]
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertTrue(isinstance(entry.name, str))
self.assertTrue(isinstance(entry.path, str))
entry_name = 'unicod\u018f.txt'
self.assertEqual(entry.name, entry_name)
self.assertEqual(entry.path, os.path.join(path, 'unicod\u018f.txt'))
# Check that it handles unicode input properly
path = os.path.join(TEST_PATH, 'subdir', 'unidir\u018f')
self.assertTrue(isinstance(path, str))
entries = list(self.scandir_func(path))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertTrue(isinstance(entry.name, str))
self.assertTrue(isinstance(entry.path, str))
self.assertEqual(entry.name, 'file1.txt')
self.assertEqual(entry.path, os.path.join(path, 'file1.txt'))
def test_walk_unicode_handling(self):
encoding = sys.getfilesystemencoding()
dirname_unicode = u'test_unicode_dir'
dirname_bytes = dirname_unicode.encode(encoding)
dirpath = os.path.join(TEST_PATH.encode(encoding), dirname_bytes)
try:
os.makedirs(dirpath)
if sys.platform != 'win32':
# test bytes
self.assertTrue(isinstance(dirpath, bytes))
for (path, dirs, files) in scandir.walk(dirpath):
self.assertTrue(isinstance(path, bytes))
# test unicode
text_type = str if IS_PY3 else unicode
dirpath_unicode = text_type(dirpath, encoding)
self.assertTrue(isinstance(dirpath_unicode, text_type))
for (path, dirs, files) in scandir.walk(dirpath_unicode):
self.assertTrue(isinstance(path, text_type))
finally:
shutil.rmtree(dirpath)
if has_scandir:
class TestScandirGeneric(TestMixin, unittest.TestCase):
def setUp(self):
self.scandir_func = scandir.scandir_generic
self.has_file_attributes = False
TestMixin.setUp(self)
if getattr(scandir, 'scandir_python', None):
class TestScandirPython(TestMixin, unittest.TestCase):
def setUp(self):
self.scandir_func = scandir.scandir_python
self.has_file_attributes = True
TestMixin.setUp(self)
if getattr(scandir, 'scandir_c', None):
class TestScandirC(TestMixin, unittest.TestCase):
def setUp(self):
self.scandir_func = scandir.scandir_c
self.has_file_attributes = True
TestMixin.setUp(self)
if hasattr(os, 'scandir'):
class TestScandirOS(TestMixin, unittest.TestCase):
def setUp(self):
self.scandir_func = os.scandir
self.has_file_attributes = True
TestMixin.setUp(self)
|
[
"os.mkdir",
"os.remove",
"scandir.walk",
"os.makedirs",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"sys.getfilesystemencoding",
"time.sleep",
"os.path.normpath",
"shutil.rmtree",
"os.symlink",
"os.path.join"
] |
[((1000, 1019), 'os.mkdir', 'os.mkdir', (['TEST_PATH'], {}), '(TEST_PATH)\n', (1008, 1019), False, 'import os\n'), ((305, 330), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (320, 330), False, 'import os\n'), ((584, 615), 'os.symlink', 'os.symlink', (['__file__', 'link_name'], {}), '(__file__, link_name)\n', (594, 615), False, 'import os\n'), ((624, 644), 'os.remove', 'os.remove', (['link_name'], {}), '(link_name)\n', (633, 644), False, 'import os\n'), ((1993, 2049), 'os.symlink', 'os.symlink', (['dir_name', 'dir_link'], {'target_is_directory': '(True)'}), '(dir_name, dir_link, target_is_directory=True)\n', (2003, 2049), False, 'import os\n'), ((2068, 2098), 'os.symlink', 'os.symlink', (['dir_name', 'dir_link'], {}), '(dir_name, dir_link)\n', (2078, 2098), False, 'import os\n'), ((2134, 2158), 'shutil.rmtree', 'shutil.rmtree', (['TEST_PATH'], {}), '(TEST_PATH)\n', (2147, 2158), False, 'import shutil\n'), ((8744, 8777), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""subdir"""'], {}), "(TEST_PATH, 'subdir')\n", (8756, 8777), False, 'import os\n'), ((9419, 9463), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""subdir"""', '"""unidirƏ"""'], {}), "(TEST_PATH, 'subdir', 'unidirƏ')\n", (9431, 9463), False, 'import os\n'), ((9922, 9949), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (9947, 9949), False, 'import sys\n'), ((536, 561), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (551, 561), False, 'import os\n'), ((2232, 2247), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2242, 2247), False, 'import time\n'), ((2256, 2280), 'shutil.rmtree', 'shutil.rmtree', (['TEST_PATH'], {}), '(TEST_PATH)\n', (2269, 2280), False, 'import shutil\n'), ((2344, 2369), 'os.path.exists', 'os.path.exists', (['TEST_PATH'], {}), '(TEST_PATH)\n', (2358, 2369), False, 'import os\n'), ((7657, 7684), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (7682, 7684), False, 'import sys\n'), ((8453, 8480), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (8478, 8480), False, 'import sys\n'), ((8579, 8609), 'os.path.join', 'os.path.join', (['path', 'entry_name'], {}), '(path, entry_name)\n', (8591, 8609), False, 'import os\n'), ((9308, 9341), 'os.path.join', 'os.path.join', (['path', '"""unicodƏ.txt"""'], {}), "(path, 'unicodƏ.txt')\n", (9320, 9341), False, 'import os\n'), ((9827, 9858), 'os.path.join', 'os.path.join', (['path', '"""file1.txt"""'], {}), "(path, 'file1.txt')\n", (9839, 9858), False, 'import os\n'), ((10152, 10172), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (10163, 10172), False, 'import os\n'), ((10675, 10704), 'scandir.walk', 'scandir.walk', (['dirpath_unicode'], {}), '(dirpath_unicode)\n', (10687, 10704), False, 'import scandir\n'), ((10796, 10818), 'shutil.rmtree', 'shutil.rmtree', (['dirpath'], {}), '(dirpath)\n', (10809, 10818), False, 'import shutil\n'), ((3078, 3109), 'os.path.join', 'os.path.join', (['TEST_PATH', 'e.name'], {}), '(TEST_PATH, e.name)\n', (3090, 3109), False, 'import os\n'), ((3870, 3905), 'os.path.join', 'os.path.join', (['TEST_PATH', 'entry.name'], {}), '(TEST_PATH, entry.name)\n', (3882, 3905), False, 'import os\n'), ((5983, 6007), 'os.path.basename', 'os.path.basename', (['e.name'], {}), '(e.name)\n', (5999, 6007), False, 'import os\n'), ((6222, 6246), 'os.path.normpath', 'os.path.normpath', (['e.path'], {}), '(e.path)\n', (6238, 6246), False, 'import os\n'), ((6439, 6473), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""linkdir"""'], {}), "(TEST_PATH, 'linkdir')\n", (6451, 6473), False, 'import os\n'), ((7616, 7649), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""subdir"""'], {}), "(TEST_PATH, 'subdir')\n", (7628, 7649), False, 'import os\n'), ((8832, 8859), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (8857, 8859), False, 'import sys\n'), ((10346, 10367), 'scandir.walk', 'scandir.walk', (['dirpath'], {}), '(dirpath)\n', (10358, 10367), False, 'import scandir\n'), ((2466, 2514), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""linkdir"""', '"""linksubdir"""'], {}), "(TEST_PATH, 'linkdir', 'linksubdir')\n", (2478, 2514), False, 'import os\n'), ((6144, 6175), 'os.path.join', 'os.path.join', (['TEST_PATH', 'e.name'], {}), '(TEST_PATH, e.name)\n', (6156, 6175), False, 'import os\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
import time
import csv
from itertools import combinations_with_replacement
import timeit
import torch
import gc
sys.path.append('/pytorch-cifar-master/')
# mode = 'one'
mode = 'two'
# one_exec = 'cuda'
# co_exec = 'cuda'
co_exec = 'nn'
path_exe = ['PolyBench_exe', 'pytorch-cifar-master']
poly_exe = ['2DConvolution', '3DConvolution', '3mm', 'atax', 'bicg', 'gemm', 'gesummv', 'mvt', 'syr2k', 'syrk',
'fdtd2d', 'correlation', 'covariance']
# poly_exe = ['2mm','gramschmidt']
epoch = '32'
inference_exe = [(epoch, '1', 'test', 'VGG'), (epoch, '1', 'test', 'ResNet'), (epoch, '1', 'test', 'GoogleNet'), (epoch, '1', 'test', 'DenseNet'), (epoch, '1', 'test', 'MobileNet'),
(epoch, '16', 'test', 'VGG'), (epoch, '16', 'test', 'ResNet'), (epoch, '16', 'test', 'GoogleNet'), (epoch, '16', 'test', 'DenseNet'), (epoch, '16', 'test', 'MobileNet'),
(epoch, '32', 'test', 'VGG'), (epoch, '32', 'test', 'ResNet'), (epoch, '32', 'test', 'GoogleNet'),(epoch, '32', 'test', 'DenseNet'), (epoch, '32', 'test', 'MobileNet'),
(epoch, '64', 'test', 'VGG'), (epoch, '64', 'test', 'ResNet'), (epoch, '64', 'test', 'GoogleNet'), (epoch, '64', 'test', 'DenseNet'), (epoch, '64', 'test', 'MobileNet')]
one_task_time_csv = 'one_task_time.csv'
two_task_time_csv = 'two_task_time.csv'
threshold = 50 # if the execution time of any benchmark is less than the threshold, the benchmark will be iteratively executed
def execute_one():
# ***** execute CUDA *****
bench = 'PolyBench_exe'
for i in poly_exe:
loop_time, t = 0, 0
while(t < threshold):
# ****** time: start *******
start_time = time.time()
print('Loop: %d, Executing of *** %s *** in %s' % (loop_time, i, bench))
p1 = subprocess.Popen(
'./%s/%s.exe' % (bench, i), close_fds=True,
shell=True, preexec_fn=os.setsid)
while 1:
ret = subprocess.Popen.poll(p1)
if ret == 0:
break
elif ret is None:
pass
end_time = time.time()
exec_time = (end_time - start_time)
print('execution time of the task is %f' % exec_time)
loop_time += 1 # number of loops
t += exec_time # total execution of the benchmark (multiple loops)
print('Job: %s, Exec: %d, total time: %f seconds' % (i, loop_time, t))
f = open(one_task_time_csv, 'a+')
csv_w = csv.writer(f)
csv_w.writerow([i, t, loop_time])
time.sleep(2)
f.close()
bench = 'pytorch-cifar-master'
for i in inference_exe:
loop_time, t = 0, 0
str_i = '-'
while(t < threshold):
# ****** time: start *******
start_time = time.time()
print('Loop:%d, Executing of *** %s ***, epoch:%s,batch:%s,mode:%s' % (loop_time, i[3], i[0], i[1], i[2]))
p1 = subprocess.Popen(
'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (bench, i[0], i[1], i[2], i[3]), close_fds=True,
shell=True, preexec_fn=os.setsid)
while 1:
ret = subprocess.Popen.poll(p1)
if ret == 0:
break
elif ret is None:
pass
end_time = time.time()
exec_time = (end_time - start_time)
loop_time += 1
t += exec_time
print('Job: %s, Exec: %d, total time: %f seconds' % ("i", loop_time, t))
f = open(one_task_time_csv,'a+')
csv_w = csv.writer(f)
csv_w.writerow([str_i.join(i), t, loop_time])
time.sleep(2)
f.close()
def execute_two():
# read the loop time and execute time from "one_task_time.csv"
# format ---> [name of benchmark, execution time, loop time]
dict_one = {}
f = open(one_task_time_csv, 'r')
csv_r = csv.reader(f)
for p in csv_r:
name = p[0]
dict_one[name] = p # {'name':['name', exec_time, loop_time]}
f.close()
# emerge the inference_exe's name
# ('32','64','test','MobileNet') ---> '32-64-test-MobileNet'
str_i = '-'
inference_exe_join = []
for i in inference_exe:
inference_exe_join.append(str_i.join(i))
# poly_exe + inference_exe_join
# ['2DConvolution', '3DConvolution', ...] + ['32-1-test-VGG', '32-16-test-VGG', ...]
# em_exe = poly_exe + inference_exe_join
em_exe = poly_exe + inference_exe
# execute two tasks in order
for p in combinations_with_replacement(em_exe,2):
i, j = '2DConvolution', '3mm'
print('Execute *** %s *** & *** %s ***'% (i, j))
c_1, c_2 = 0, 0 # 0: PolyBench_exe, 1: inference_exe
# verify the category of the tasks
if i in poly_exe:
print('task 1 in poly_exe')
c_1 = 0
n_1, t_1, l_1 = dict_one[i] # obtain the name, exectuion time, loop time of task 1
elif i in inference_exe:
print('task 1 in inference_exe')
c_1 = 1
str_temp = '-'
n_1, t_1, l_1 = dict_one[str_temp.join(i)]
else:
n_1, t_1, l_1 = 0, 0, 0
print('task 1 do not belong to any category')
exit(0)
l_1 = int(l_1)
if j in poly_exe:
print('task 2 in poly_exe')
c_2 = 0
n_2, t_2, l_2 = dict_one[j] # obtain the name, exectuion time, loop time of task 1
elif j in inference_exe:
print('task 2 in inference_exe')
c_2 = 1
str_temp = '-'
n_2, t_2, l_2 = dict_one[str_temp.join(j)]
else:
n_2, t_2, l_2 = 0, 0, 0
print('task 2 do not belong to any category')
exit(0)
l_2 = int(l_2)
cnt_1, cnt_2 = 0, 0
total_exec_1, total_exec_2 = 0, 0
s_1, s_2 = 0, 0 # 0: task stopped, 1: task executing
o_1, o_2 = 0, 0 # 0: loop, 1: loop over
test_1, test_2 = 0, 0
while(o_1 == 0 or o_2 == 0):
print('loop of task 1 and 2: %d & %d' % (cnt_1, cnt_2))
print('o_1 and o_2: %d & %d' % (o_1, o_2))
# execute task 1
if o_1 == 0 and s_1 == 0:
test_1 += 1
s_1 = 1 # task 1 is being executed
if c_1 == 0:
print('task 1 is in poly_exe')
bench1 = 'PolyBench_exe'
start_time_1 = time.time()
p1 = subprocess.Popen(
'./%s/%s.exe' % (bench1, n_1),close_fds=True,
shell=True, preexec_fn=os.setsid)
elif c_1 == 1:
print('task 1 is in inference_exe')
bench1 = 'pytorch-cifar-master'
start_time_1 = time.time()
p1 = subprocess.Popen(
'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (bench1, i[0], i[1], i[2], i[3]),close_fds=True,
shell=True, preexec_fn=os.setsid)
else:
print('c_1 error, exit')
exit(0)
else:
pass
# execute task 2
if o_2 == 0 and s_2 == 0:
test_2 += 1
s_2 = 1 # task 2 is being executed
if c_2 == 0:
print('task 2 is in poly_exe')
bench2 = 'PolyBench_exe'
start_time_2 = time.time()
p2 = subprocess.Popen(
'./%s/%s.exe' % (bench2, n_2),close_fds=True,
shell=True, preexec_fn=os.setsid)
elif c_2 == 1:
print('task 2 is in inference_exe')
bench2 = 'pytorch-cifar-master'
start_time_2 = time.time()
p2 = subprocess.Popen(
'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (
bench2, j[0], j[1], j[2], j[3]),close_fds=True,
shell=True, preexec_fn=os.setsid)
else:
print('c_2 error, exit')
exit(0)
else:
pass
# control loop
# e_1, e_2 = 0, 0
while 1:
ret1 = subprocess.Popen.poll(p1)
ret2 = subprocess.Popen.poll(p2)
# if ret1 == 0 and ret2 == 0 and s_1 == 1 and s_2 == 1:
# print('two tasks are stopped at the same time')
# print(n_1,n_2)
# exit(0)
# print(s_1,s_2)
# print(ret1,ret2)
# print('ret1:%d, ret2:%d' %(ret1,ret2))
if ret1 == 0 and s_1 == 1:
# if ret1 == 0 and ret2 == 0:
# print('task 1 is stopped')
# print(ret1,ret2)
# print('s_1:%d,s2:%d' % (s_1,s_2))
# print('c_1:%d,c_2:%d' % (c_1,c_2))
cnt_1 += 1
s_1 = 0
# e_1 = 1
end_time_1 = time.time()
exec_time_1 = end_time_1 - start_time_1
print('Execution time of task 1 is %f' % exec_time_1) # ********
total_exec_1 += exec_time_1
o_1 = int(cnt_1 >= l_1)
# if ret1 == 0 and ret2 == 0:
# test = input('input:')
break
else:
pass
if ret2 == 0 and s_2 == 1:
# if ret1 == 0 and ret2 == 0:
# print('task 2 is stopped')
# print(ret1, ret2)
# print('s_1:%d,s2:%d' % (s_1, s_2))
# print('c_1:%d,c_2:%d' % (c_1, c_2))
cnt_2 += 1
s_2 = 0
# e_2 = 1
end_time_2 = time.time()
exec_time_2 = end_time_2 - start_time_2
print('Execution time of task 2 is %f' % exec_time_2) # ********
total_exec_2 += exec_time_2
o_2 = int(cnt_2 >= l_2)
# if ret1 == 0 and ret2 == 0:
# test = input('input:')
break
else:
pass
# if cnt_1 >= l_1:
# o_1 = 1
#
# if cnt_2 >= l_2:
# o_2 = 1
#
# if e_1 == 1 or e_2 == 1:
# break
# f = open(two_task_time_csv, 'a+')
# csv_w = csv.writer(f)
# csv_w.writerow([n_1, n_2, t_1, total_exec_1, t_2, total_exec_2]) # [name of task1, name of task2, execution time of task1(one), execution time of task1(two), execution time of task2(one), execution time of task2(two)]
# f.close()
time.sleep(2)
# torch.cuda.empty_cache()
print('test: %d & %d' % (test_1, test_2))
print('cnt: %d & %d' % (cnt_1, cnt_2))
# exit(0)
# execute two tasks from CUDA (e.g., PolyBench)
# if co_exec == 'cuda':
# bench = 'PolyBench_exe'
# for p in combinations_with_replacement(poly_exe,2):
# i, j = p[0], p[1]
# print(' *** Executing %s and %s *** ' % (i,j))
# s1, s2 = 0, 0 # flag of two jobs (1: completed, 0: ongoing)
# exec_time_1, exec_time_2 = 0, 0
#
# start_time = time.time()
# p1 = subprocess.Popen(
# './%s/%s.exe' % (bench, i),
# shell=True, preexec_fn=os.setsid)
# p2 = subprocess.Popen(
# './%s/%s.exe' % (bench, j),
# shell=True, preexec_fn=os.setsid)
#
# while 1:
# ret1 = subprocess.Popen.poll(p1)
# ret2 = subprocess.Popen.poll(p2)
# if ret1 == 0 and s1 == 0:
# s1 = 1
# end_time_1 = time.time()
# exec_time_1 = (end_time_1 - start_time)
#
# if ret2 == 0 and s2 == 0:
# s2 = 1
# end_time_2 = time.time()
# exec_time_2 = (end_time_2 - start_time)
#
# if s1 == 1 and s2 == 1:
# f = open(two_task_time_csv, 'a+')
# csv_w = csv.writer(f)
# csv_w.writerow([i, j, exec_time_1, exec_time_2])
# time.sleep(2)
# f.close()
# break
# else:
# pass
#
# elif co_exec == 'nn':
# # execute two inference tasks
# bench = 'pytorch-cifar-master'
# for p in combinations_with_replacement(inference_exe,2):
# i, j = p[0], p[1]
# if i[3] == j[3]:
# continue
# print(' *** Executing %s and %s *** ' % (i[3],j[3]))
#
# s1, s2 = 0, 0 # flag of two jobs (1: completed, 0: ongoing)
# exec_time_1, exec_time_2 = 0, 0
#
# start_time = time.time()
# p1 = subprocess.Popen(
# 'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (bench, i[0], i[1], i[2], i[3]),
# shell=True, preexec_fn=os.setsid)
# p2 = subprocess.Popen(
# 'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (bench, j[0], j[1], j[2], j[3]),
# shell=True, preexec_fn=os.setsid)
#
# while 1:
# ret1 = subprocess.Popen.poll(p1)
# ret2 = subprocess.Popen.poll(p2)
# if ret1 == 0 and s1 == 0:
# s1 = 1
# end_time_1 = time.time()
# exec_time_1 = (end_time_1 - start_time)
#
# if ret2 == 0 and s2 == 0:
# s2 = 1
# end_time_2 = time.time()
# exec_time_2 = (end_time_2 - start_time)
#
# if s1 == 1 and s2 == 1:
# f = open(two_task_time_csv, 'a+')
# csv_w = csv.writer(f)
# csv_w.writerow([i, j, exec_time_1, exec_time_2])
# time.sleep(2)
# f.close()
# torch.cuda.empty_cache()
# break
# else:
# pass
if __name__ == '__main__':
# if mode == 'one':
# execute_one()
# elif mode == 'two':
# execute_two()
# bench1 = 'PolyBench_exe'
# bench2 = 'PolyBench_exe'
# n_1 = '2DConvolution'
# n_2 = 'atax'
# start_time_1 = time.time()
# p1 = subprocess.Popen(
# './%s/%s.exe' % (bench1, n_1), close_fds=True,
# shell=True, preexec_fn=os.setsid)
# start_time_2 = time.time()
# p2 = subprocess.Popen(
# './%s/%s.exe' % (bench2, n_2), close_fds=True,
# shell=True, preexec_fn=os.setsid)
#
# s1, s2 = 0, 0
# while 1:
# ret1 = subprocess.Popen.poll(p1)
# ret2 = subprocess.Popen.poll(p2)
#
#
# if ret1 == 0 and s1 == 0:
# s1 = 1
# end_time_1 = time.time()
# exec_time_1 = end_time_1 - start_time_1
# # print('Execution time of task 1 is %f' % exec_time_1) # ********
#
# if ret2 == 0 and s2 == 0:
# s2 = 1
# end_time_2 = time.time()
# exec_time_2 = end_time_2 - start_time_2
# # print('Execution time of task 2 is %f' % exec_time_2) # ********
#
# if s1 == 1 and s2 == 1:
# break
#
# print('Execution time of task 1 is %f' % exec_time_1) # ********
# print('Execution time of task 2 is %f' % exec_time_2) # ********
# print('two tasks are completed')
DEVNULL = open(os.devnull, 'wb')
bench1 = 'PolyBench_exe'
bench2 = 'PolyBench_exe'
n_1 = '3DConvolution'
n_2 = '32-16-test-MobileNet'
cnt_1, cnt_2 = 0, 0
s_1, s_2 = 0, 0
o_1, o_2 = 0, 0
l_1, l_2 = 22, 5
sel = 0
total_exec_1, total_exec_2 = 0, 0
while(cnt_1 < l_1 or cnt_2 < l_2):
print('loop of task 1 and 2: %d & %d' % (cnt_1, cnt_2))
print('o_1 and o_2: %d & %d' % (o_1, o_2))
if cnt_1 < l_1 and s_1 == 0:
s_1 = 1
start_time_1 = time.time()
p1 = subprocess.Popen(
'./%s/%s.exe' % (bench1, n_1), close_fds=True,
shell=True, preexec_fn=os.setsid,stdout=DEVNULL,stderr=subprocess.STDOUT)
if cnt_2 < l_2 and s_2 == 0:
s_2 = 1
start_time_2 = time.time()
p2 = subprocess.Popen(
'./%s/%s.exe' % (bench2, n_2), close_fds=True,
shell=True, preexec_fn=os.setsid,stdout=DEVNULL,stderr=subprocess.STDOUT)
while 1:
ret1 = subprocess.Popen.poll(p1)
ret2 = subprocess.Popen.poll(p2)
if ret1 == 0 and s_1 == 1:
s_1 = 0
cnt_1 += 1
end_time_1 = time.time()
exec_time_1 = end_time_1 - start_time_1
total_exec_1 += exec_time_1
print('Execution time of task 1 is %f' % exec_time_1) # ********
# o_1 = int(cnt_1 >= l_1)
break
if ret2 == 0 and s_2 == 1:
s_2 = 0
cnt_2 += 1
end_time_2 = time.time()
exec_time_2 = end_time_2 - start_time_2
total_exec_2 += exec_time_2
print('Execution time of task 2 is %f' % exec_time_2) # ********
# o_2 = int(cnt_2 >= l_2)
break
print('two tasks are completed')
print(total_exec_1,total_exec_2)
# p1 = subprocess.Popen(
# 'python test_2D.py', close_fds=True,
# shell=True, preexec_fn=os.setsid)
# p2 = subprocess.Popen(
# 'python test_3mm.py', close_fds=True,
# shell=True, preexec_fn=os.setsid)
|
[
"sys.path.append",
"subprocess.Popen",
"csv.reader",
"csv.writer",
"subprocess.Popen.poll",
"time.sleep",
"itertools.combinations_with_replacement",
"time.time"
] |
[((194, 235), 'sys.path.append', 'sys.path.append', (['"""/pytorch-cifar-master/"""'], {}), "('/pytorch-cifar-master/')\n", (209, 235), False, 'import sys\n'), ((4077, 4090), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4087, 4090), False, 'import csv\n'), ((4697, 4737), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['em_exe', '(2)'], {}), '(em_exe, 2)\n', (4726, 4737), False, 'from itertools import combinations_with_replacement\n'), ((2631, 2644), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2641, 2644), False, 'import csv\n'), ((2695, 2708), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2705, 2708), False, 'import time\n'), ((3749, 3762), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3759, 3762), False, 'import csv\n'), ((3825, 3838), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3835, 3838), False, 'import time\n'), ((11296, 11309), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11306, 11309), False, 'import time\n'), ((1773, 1784), 'time.time', 'time.time', ([], {}), '()\n', (1782, 1784), False, 'import time\n'), ((1887, 1985), 'subprocess.Popen', 'subprocess.Popen', (["('./%s/%s.exe' % (bench, i))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid'}), "('./%s/%s.exe' % (bench, i), close_fds=True, shell=True,\n preexec_fn=os.setsid)\n", (1903, 1985), False, 'import subprocess\n'), ((2223, 2234), 'time.time', 'time.time', ([], {}), '()\n', (2232, 2234), False, 'import time\n'), ((2936, 2947), 'time.time', 'time.time', ([], {}), '()\n', (2945, 2947), False, 'import time\n'), ((3084, 3267), 'subprocess.Popen', 'subprocess.Popen', (["('python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (\n bench, i[0], i[1], i[2], i[3]))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid'}), "(\n 'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (\n bench, i[0], i[1], i[2], i[3]), close_fds=True, shell=True, preexec_fn=\n os.setsid)\n", (3100, 3267), False, 'import subprocess\n'), ((3495, 3506), 'time.time', 'time.time', ([], {}), '()\n', (3504, 3506), False, 'import time\n'), ((16826, 16837), 'time.time', 'time.time', ([], {}), '()\n', (16835, 16837), False, 'import time\n'), ((16855, 16998), 'subprocess.Popen', 'subprocess.Popen', (["('./%s/%s.exe' % (bench1, n_1))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid', 'stdout': 'DEVNULL', 'stderr': 'subprocess.STDOUT'}), "('./%s/%s.exe' % (bench1, n_1), close_fds=True, shell=True,\n preexec_fn=os.setsid, stdout=DEVNULL, stderr=subprocess.STDOUT)\n", (16871, 16998), False, 'import subprocess\n'), ((17111, 17122), 'time.time', 'time.time', ([], {}), '()\n', (17120, 17122), False, 'import time\n'), ((17140, 17283), 'subprocess.Popen', 'subprocess.Popen', (["('./%s/%s.exe' % (bench2, n_2))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid', 'stdout': 'DEVNULL', 'stderr': 'subprocess.STDOUT'}), "('./%s/%s.exe' % (bench2, n_2), close_fds=True, shell=True,\n preexec_fn=os.setsid, stdout=DEVNULL, stderr=subprocess.STDOUT)\n", (17156, 17283), False, 'import subprocess\n'), ((17349, 17374), 'subprocess.Popen.poll', 'subprocess.Popen.poll', (['p1'], {}), '(p1)\n', (17370, 17374), False, 'import subprocess\n'), ((17394, 17419), 'subprocess.Popen.poll', 'subprocess.Popen.poll', (['p2'], {}), '(p2)\n', (17415, 17419), False, 'import subprocess\n'), ((2059, 2084), 'subprocess.Popen.poll', 'subprocess.Popen.poll', (['p1'], {}), '(p1)\n', (2080, 2084), False, 'import subprocess\n'), ((3331, 3356), 'subprocess.Popen.poll', 'subprocess.Popen.poll', (['p1'], {}), '(p1)\n', (3352, 3356), False, 'import subprocess\n'), ((8588, 8613), 'subprocess.Popen.poll', 'subprocess.Popen.poll', (['p1'], {}), '(p1)\n', (8609, 8613), False, 'import subprocess\n'), ((8637, 8662), 'subprocess.Popen.poll', 'subprocess.Popen.poll', (['p2'], {}), '(p2)\n', (8658, 8662), False, 'import subprocess\n'), ((17540, 17551), 'time.time', 'time.time', ([], {}), '()\n', (17549, 17551), False, 'import time\n'), ((17918, 17929), 'time.time', 'time.time', ([], {}), '()\n', (17927, 17929), False, 'import time\n'), ((6660, 6671), 'time.time', 'time.time', ([], {}), '()\n', (6669, 6671), False, 'import time\n'), ((6697, 6798), 'subprocess.Popen', 'subprocess.Popen', (["('./%s/%s.exe' % (bench1, n_1))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid'}), "('./%s/%s.exe' % (bench1, n_1), close_fds=True, shell=True,\n preexec_fn=os.setsid)\n", (6713, 6798), False, 'import subprocess\n'), ((7716, 7727), 'time.time', 'time.time', ([], {}), '()\n', (7725, 7727), False, 'import time\n'), ((7753, 7854), 'subprocess.Popen', 'subprocess.Popen', (["('./%s/%s.exe' % (bench2, n_2))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid'}), "('./%s/%s.exe' % (bench2, n_2), close_fds=True, shell=True,\n preexec_fn=os.setsid)\n", (7769, 7854), False, 'import subprocess\n'), ((9431, 9442), 'time.time', 'time.time', ([], {}), '()\n', (9440, 9442), False, 'import time\n'), ((10294, 10305), 'time.time', 'time.time', ([], {}), '()\n', (10303, 10305), False, 'import time\n'), ((7017, 7028), 'time.time', 'time.time', ([], {}), '()\n', (7026, 7028), False, 'import time\n'), ((7054, 7237), 'subprocess.Popen', 'subprocess.Popen', (["('python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (\n bench1, i[0], i[1], i[2], i[3]))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid'}), "(\n 'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (\n bench1, i[0], i[1], i[2], i[3]), close_fds=True, shell=True, preexec_fn\n =os.setsid)\n", (7070, 7237), False, 'import subprocess\n'), ((8073, 8084), 'time.time', 'time.time', ([], {}), '()\n', (8082, 8084), False, 'import time\n'), ((8110, 8293), 'subprocess.Popen', 'subprocess.Popen', (["('python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (\n bench2, j[0], j[1], j[2], j[3]))"], {'close_fds': '(True)', 'shell': '(True)', 'preexec_fn': 'os.setsid'}), "(\n 'python ./%s/main_arg.py --epoch %s --batch %s --job %s --net %s' % (\n bench2, j[0], j[1], j[2], j[3]), close_fds=True, shell=True, preexec_fn\n =os.setsid)\n", (8126, 8293), False, 'import subprocess\n')]
|
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class CustomPagination(PageNumberPagination):
def get_paginated_response(self, data):
return Response(data)
|
[
"rest_framework.response.Response"
] |
[((217, 231), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (225, 231), False, 'from rest_framework.response import Response\n')]
|
from typing import Tuple
from uuid import uuid4
from opwen_email_server import azure_constants as constants
from opwen_email_server import config
from opwen_email_server import events
from opwen_email_server.services.auth import AzureAuth
from opwen_email_server.services.queue import AzureQueue
from opwen_email_server.services.storage import AzureTextStorage
from opwen_email_server.utils.log import LogMixin
STORAGE = AzureTextStorage(account=config.BLOBS_ACCOUNT,
key=config.BLOBS_KEY,
container=constants.CONTAINER_SENDGRID_MIME)
QUEUE = AzureQueue(namespace=config.QUEUES_NAMESPACE,
sas_key=config.QUEUES_SAS_KEY,
sas_name=config.QUEUES_SAS_NAME,
name=constants.QUEUE_SENDGRID_MIME)
CLIENTS = AzureAuth(account=config.TABLES_ACCOUNT, key=config.TABLES_KEY,
table=constants.TABLE_AUTH)
class _Receiver(LogMixin):
def __call__(self, client_id: str, email: str) -> Tuple[str, int]:
domain = CLIENTS.domain_for(client_id)
if not domain:
self.log_event(events.UNREGISTERED_CLIENT, {'client_id': client_id}) # noqa: E501
return 'client is not registered', 403
email_id = str(uuid4())
STORAGE.store_text(email_id, email)
QUEUE.enqueue({
'_version': '0.1',
'_type': 'mime_email_received',
'resource_id': email_id,
'container_name': STORAGE.container,
})
self.log_event(events.EMAIL_RECEIVED_FOR_CLIENT, {'domain': domain}) # noqa: E501
return 'received', 200
receive = _Receiver()
|
[
"uuid.uuid4",
"opwen_email_server.services.auth.AzureAuth",
"opwen_email_server.services.queue.AzureQueue",
"opwen_email_server.services.storage.AzureTextStorage"
] |
[((423, 540), 'opwen_email_server.services.storage.AzureTextStorage', 'AzureTextStorage', ([], {'account': 'config.BLOBS_ACCOUNT', 'key': 'config.BLOBS_KEY', 'container': 'constants.CONTAINER_SENDGRID_MIME'}), '(account=config.BLOBS_ACCOUNT, key=config.BLOBS_KEY,\n container=constants.CONTAINER_SENDGRID_MIME)\n', (439, 540), False, 'from opwen_email_server.services.storage import AzureTextStorage\n'), ((600, 749), 'opwen_email_server.services.queue.AzureQueue', 'AzureQueue', ([], {'namespace': 'config.QUEUES_NAMESPACE', 'sas_key': 'config.QUEUES_SAS_KEY', 'sas_name': 'config.QUEUES_SAS_NAME', 'name': 'constants.QUEUE_SENDGRID_MIME'}), '(namespace=config.QUEUES_NAMESPACE, sas_key=config.QUEUES_SAS_KEY,\n sas_name=config.QUEUES_SAS_NAME, name=constants.QUEUE_SENDGRID_MIME)\n', (610, 749), False, 'from opwen_email_server.services.queue import AzureQueue\n'), ((814, 910), 'opwen_email_server.services.auth.AzureAuth', 'AzureAuth', ([], {'account': 'config.TABLES_ACCOUNT', 'key': 'config.TABLES_KEY', 'table': 'constants.TABLE_AUTH'}), '(account=config.TABLES_ACCOUNT, key=config.TABLES_KEY, table=\n constants.TABLE_AUTH)\n', (823, 910), False, 'from opwen_email_server.services.auth import AzureAuth\n'), ((1266, 1273), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1271, 1273), False, 'from uuid import uuid4\n')]
|
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import threading
from django.db import transaction
from chroma_core.services import ChromaService, ServiceThread
from chroma_core.services.plugin_runner.resource_manager import ResourceManager
class AgentPluginHandlerCollection(object):
def __init__(self, resource_manager):
from chroma_core.lib.storage_plugin.manager import storage_plugin_manager
from chroma_core.services.plugin_runner.agent_daemon import AgentPluginHandler
self.resource_manager = resource_manager
self.handlers = {}
for plugin_name in storage_plugin_manager.loaded_plugin_names:
self.handlers[plugin_name] = AgentPluginHandler(resource_manager, plugin_name)
def setup_host(self, host_id, updates):
for plugin_name, data in updates.items():
self.handlers[plugin_name].setup_host(host_id, data)
def update_host_resources(self, host_id, updates):
for plugin_name, data in updates.items():
self.handlers[plugin_name].update_host_resources(host_id, data)
def remove_host_resources(self, host_id):
for handler in self.handlers.values():
handler.remove_host_resources(host_id)
@transaction.atomic
def rebalance_host_volumes(self, host_id):
from chroma_core.models import Volume
candidates = Volume.objects.filter(volumenode__host__id=host_id).distinct()
self.resource_manager.balance_unweighted_volume_nodes(candidates)
class Service(ChromaService):
def __init__(self):
super(Service, self).__init__()
self.threads = []
self._children_started = threading.Event()
self._complete = threading.Event()
def run(self):
from chroma_core.services.plugin_runner.agent_daemon_interface import AgentDaemonRpcInterface
from chroma_core.services.plugin_runner.scan_daemon import ScanDaemon
from chroma_core.services.plugin_runner.scan_daemon_interface import ScanDaemonRpcInterface
from chroma_core.lib.storage_plugin.manager import storage_plugin_manager
super(Service, self).run()
errors = storage_plugin_manager.get_errored_plugins()
if errors:
self.log.error("The following plugins could not be loaded: %s" % errors)
raise RuntimeError("Some plugins could not be loaded: %s" % errors)
resource_manager = ResourceManager()
scan_daemon = ScanDaemon(resource_manager)
# For each plugin, start a thread which will consume its agent RX queue
agent_handlers = AgentPluginHandlerCollection(resource_manager)
for handler in agent_handlers.handlers.values():
self.threads.append(ServiceThread(handler))
scan_daemon_thread = ServiceThread(scan_daemon)
scan_rpc_thread = ServiceThread(ScanDaemonRpcInterface(scan_daemon))
agent_rpc_thread = ServiceThread(AgentDaemonRpcInterface(agent_handlers))
self.threads.extend([scan_daemon_thread, scan_rpc_thread, agent_rpc_thread])
for thread in self.threads:
thread.start()
self._children_started.set()
self._complete.wait()
self.log.debug("Leaving main loop")
def stop(self):
super(Service, self).stop()
# Guard against trying to stop after child threads are created, but before they are started.
self._children_started.wait()
self.log.debug("Stopping...")
for thread in self.threads:
thread.stop()
self.log.debug("Joining...")
for thread in self.threads:
thread.join()
self.log.debug("Done.")
self._complete.set()
|
[
"chroma_core.services.ServiceThread",
"chroma_core.services.plugin_runner.agent_daemon_interface.AgentDaemonRpcInterface",
"chroma_core.services.plugin_runner.resource_manager.ResourceManager",
"chroma_core.services.plugin_runner.agent_daemon.AgentPluginHandler",
"chroma_core.services.plugin_runner.scan_daemon_interface.ScanDaemonRpcInterface",
"threading.Event",
"chroma_core.models.Volume.objects.filter",
"chroma_core.lib.storage_plugin.manager.storage_plugin_manager.get_errored_plugins",
"chroma_core.services.plugin_runner.scan_daemon.ScanDaemon"
] |
[((1759, 1776), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1774, 1776), False, 'import threading\n'), ((1802, 1819), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1817, 1819), False, 'import threading\n'), ((2256, 2300), 'chroma_core.lib.storage_plugin.manager.storage_plugin_manager.get_errored_plugins', 'storage_plugin_manager.get_errored_plugins', ([], {}), '()\n', (2298, 2300), False, 'from chroma_core.lib.storage_plugin.manager import storage_plugin_manager\n'), ((2513, 2530), 'chroma_core.services.plugin_runner.resource_manager.ResourceManager', 'ResourceManager', ([], {}), '()\n', (2528, 2530), False, 'from chroma_core.services.plugin_runner.resource_manager import ResourceManager\n'), ((2553, 2581), 'chroma_core.services.plugin_runner.scan_daemon.ScanDaemon', 'ScanDaemon', (['resource_manager'], {}), '(resource_manager)\n', (2563, 2581), False, 'from chroma_core.services.plugin_runner.scan_daemon import ScanDaemon\n'), ((2878, 2904), 'chroma_core.services.ServiceThread', 'ServiceThread', (['scan_daemon'], {}), '(scan_daemon)\n', (2891, 2904), False, 'from chroma_core.services import ChromaService, ServiceThread\n'), ((790, 839), 'chroma_core.services.plugin_runner.agent_daemon.AgentPluginHandler', 'AgentPluginHandler', (['resource_manager', 'plugin_name'], {}), '(resource_manager, plugin_name)\n', (808, 839), False, 'from chroma_core.services.plugin_runner.agent_daemon import AgentPluginHandler\n'), ((2945, 2980), 'chroma_core.services.plugin_runner.scan_daemon_interface.ScanDaemonRpcInterface', 'ScanDaemonRpcInterface', (['scan_daemon'], {}), '(scan_daemon)\n', (2967, 2980), False, 'from chroma_core.services.plugin_runner.scan_daemon_interface import ScanDaemonRpcInterface\n'), ((3023, 3062), 'chroma_core.services.plugin_runner.agent_daemon_interface.AgentDaemonRpcInterface', 'AgentDaemonRpcInterface', (['agent_handlers'], {}), '(agent_handlers)\n', (3046, 3062), False, 'from chroma_core.services.plugin_runner.agent_daemon_interface import AgentDaemonRpcInterface\n'), ((1467, 1518), 'chroma_core.models.Volume.objects.filter', 'Volume.objects.filter', ([], {'volumenode__host__id': 'host_id'}), '(volumenode__host__id=host_id)\n', (1488, 1518), False, 'from chroma_core.models import Volume\n'), ((2824, 2846), 'chroma_core.services.ServiceThread', 'ServiceThread', (['handler'], {}), '(handler)\n', (2837, 2846), False, 'from chroma_core.services import ChromaService, ServiceThread\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from mode_shape import make_dir
from scipy.interpolate import spline
num = 300
fre = 2
scale = 1
x = np.arange(0,101)
mode1 = np.sin(x*2*np.pi/100)
mode2 = np.sin(x*np.pi/100)
xnew = np.linspace(x.min(),x.max(),300)
#4 0.01
result_path = 'data/1+2_scale_%0.1f_fre_%d'%(scale,fre)
make_dir(result_path)
count = 0
for i in range(0,1):
for w in range(0,801):
y = (mode1*np.sin(fre*np.pi*w/100)+mode2*np.sin(fre*w*np.pi/(50)))
y = y*scale
xsmoo = spline(x, y, xnew)
plt.figure()
plt.xlim(0, 105)
plt.ylim(-50, 50)
plt.axis('off')
plt.plot(xnew, xsmoo, linewidth=15)
plt.savefig(result_path+'/%d.png'%(count))
count +=1
plt.close()
#
# print(y)
#
#
#
#
#
#
# plt.show()
#
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"scipy.interpolate.spline",
"mode_shape.make_dir",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.savefig"
] |
[((159, 176), 'numpy.arange', 'np.arange', (['(0)', '(101)'], {}), '(0, 101)\n', (168, 176), True, 'import numpy as np\n'), ((184, 211), 'numpy.sin', 'np.sin', (['(x * 2 * np.pi / 100)'], {}), '(x * 2 * np.pi / 100)\n', (190, 211), True, 'import numpy as np\n'), ((214, 237), 'numpy.sin', 'np.sin', (['(x * np.pi / 100)'], {}), '(x * np.pi / 100)\n', (220, 237), True, 'import numpy as np\n'), ((340, 361), 'mode_shape.make_dir', 'make_dir', (['result_path'], {}), '(result_path)\n', (348, 361), False, 'from mode_shape import make_dir\n'), ((533, 551), 'scipy.interpolate.spline', 'spline', (['x', 'y', 'xnew'], {}), '(x, y, xnew)\n', (539, 551), False, 'from scipy.interpolate import spline\n'), ((560, 572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (570, 572), True, 'import matplotlib.pyplot as plt\n'), ((581, 597), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(105)'], {}), '(0, 105)\n', (589, 597), True, 'import matplotlib.pyplot as plt\n'), ((606, 623), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-50)', '(50)'], {}), '(-50, 50)\n', (614, 623), True, 'import matplotlib.pyplot as plt\n'), ((632, 647), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (640, 647), True, 'import matplotlib.pyplot as plt\n'), ((656, 691), 'matplotlib.pyplot.plot', 'plt.plot', (['xnew', 'xsmoo'], {'linewidth': '(15)'}), '(xnew, xsmoo, linewidth=15)\n', (664, 691), True, 'import matplotlib.pyplot as plt\n'), ((701, 745), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(result_path + '/%d.png' % count)"], {}), "(result_path + '/%d.png' % count)\n", (712, 745), True, 'import matplotlib.pyplot as plt\n'), ((770, 781), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (779, 781), True, 'import matplotlib.pyplot as plt\n'), ((441, 470), 'numpy.sin', 'np.sin', (['(fre * np.pi * w / 100)'], {}), '(fre * np.pi * w / 100)\n', (447, 470), True, 'import numpy as np\n'), ((471, 499), 'numpy.sin', 'np.sin', (['(fre * w * np.pi / 50)'], {}), '(fre * w * np.pi / 50)\n', (477, 499), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from math import pi
class proposal_q():
def __init__(self, config, scope_name='proposal'):
self.config = config
with tf.variable_scope(scope_name) as scope:
self.param_size = (self.config.dim + 1) * self.config.n_hidden + self.config.n_hidden + 1
self._mu = tf.get_variable('mean', shape=(self.param_size), dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=.02))
self._log_variance = tf.get_variable('log_variance',
initializer=tf.constant(-10.+np.zeros((self.param_size)).astype('float32')), dtype=tf.float32)
self._log_v_noise = tf.get_variable('log_v_noise',
initializer=tf.constant(np.log(1.0,).astype('float32')),
dtype=tf.float32)
self.params = self.get_parameters_q()
def draw_samples(self, n_samples):
# (d+1) x nh + nh + 1
ret = tf.random_normal([int(n_samples), self.param_size]) * tf.sqrt(self.params['v']) + self.params['m']
return ret
def get_parameters_q(self, v_prior=1., scale=1.):
#v = tf.exp(self._log_variance)
v = 1.0 / (scale * tf.exp(-self._log_variance ) + 1./v_prior)
m = self._mu
#m = scale * self._mu * tf.exp(- self._log_variance ) * v
return {'m': m, 'v': v}
def log_prob(self, samples, stop_grad=False):
qv = self.params['v']
qm = self.params['m']
if stop_grad:
qv = tf.stop_gradient(qv)
qm = tf.stop_gradient(qm)
lq = -0.5*tf.log(2*pi*qv) - 0.5*(samples - qm)**2 / qv
return tf.reduce_sum(lq, axis=1)
class Model():
def __init__(self, config,
scope_name = 'variational', is_train=True):
self.config = config
self.debug = {}
self.N = self.config.n_train
self.v_prior = 1.
# create placeholders for the input
self.X = tf.placeholder(
name='X', dtype=tf.float32,
shape=[None, self.config.dim],
)
self.y = tf.placeholder(
name='y', dtype=tf.float32,
shape=[None],
)
self.q_approx = proposal_q(self.config)
self.kl_loss = self.get_klqp_loss(self.config.sample_size, self.X, self.y)
tf.summary.scalar("kl_loss", self.kl_loss)
self.rmse, self.ll = self.get_error_and_ll(self.X, self.y, 0., 1.)
tf.summary.scalar("batch_rmse", self.rmse)
tf.summary.scalar("batch_ll", self.ll)
def get_feed_dict(self, batch_chunk):
fd = {
self.X: batch_chunk['X'],
self.y: batch_chunk['y'],
}
return fd
#k : number of samples
def predict(self, samples_q, X):
# X: n x d
n, d = X.get_shape()[0].value, self.config.dim
k = self.config.sample_size
nh = self.config.n_hidden
# first layer
w1 = samples_q[:, :d * nh] # w1: k x (nh x d)
w1 = tf.reshape(w1, (k*nh, d)) # w1 (K x nh) x d
b1 = samples_q[:, d*nh: (d+1)*nh] # K x nh
b1 = tf.reshape(b1, (1, k*nh)) # 1 x (K x nh)
a = tf.matmul(X, w1, transpose_b=True) + b1 # n x (k * nh)
h = tf.nn.relu(a) # RELU, n x (k x nh)
# second layer
samples_q = samples_q[:, (d+1)*nh:]
w2 = samples_q[:, :nh] # w2: k x nh
w2 = tf.reshape(w2, (1, k*nh)) # w2: 1 x (kxnh)
b2 = tf.reshape(samples_q[:, nh:], (1,-1)) # b2: [k]
out = tf.reshape( tf.reduce_sum(tf.reshape(h*w2, (-1, nh)), axis=1) , (-1, k)) + b2
return out
def get_error_and_ll(self, X, y, location, scale, v_prior=1.):
v_noise = tf.exp(self.q_approx._log_v_noise) * scale**2
samples_q = self.q_approx.draw_samples( self.config.sample_size)
py = self.predict(samples_q, X) * scale + location
log_factor = -0.5 * tf.log(2 * pi * v_noise) - 0.5 * (tf.expand_dims(y, 1) - py)**2 / v_noise
ll = tf.reduce_mean(tf.reduce_logsumexp(log_factor - tf.log(1.*self.config.sample_size), axis=1))
error = tf.sqrt(tf.reduce_mean((y - tf.reduce_mean(py, 1))**2))
return error, ll
def phi(self, n_samples, lpx, lqx, method, alpha=0):
diff = lpx - lqx
if method == 'adapted':
# \#(t_i < t)
diff -= tf.reduce_max(diff)
dx = tf.exp(diff)
prob = tf.sign(tf.expand_dims(dx, 1) - tf.expand_dims(dx, 0))
#prob = tf.cast(tf.equal(prob, -1), tf.float32)
prob = tf.cast(tf.greater(prob, 0.5), tf.float32)
wx = tf.reduce_sum(prob, axis=1) / n_samples
wx = (1.-wx)**alpha ## alpha= -1 or alpha = -0.5
elif method == 'alpha':
diff = alpha * diff
diff -= tf.reduce_max(diff)
wx = tf.exp(diff)
else:
raise NotImplementedError
wx /= tf.reduce_sum(wx) # normalization
return wx
def get_klqp_loss(self, n_samples, X, y):
v_noise = tf.exp(self.q_approx._log_v_noise)
samples_q = self.q_approx.draw_samples(n_samples)
log_factor_value = 1.0 * self.N * self.log_likelihood_factor(samples_q, v_noise, X, y)
logp0 = self.log_prior(samples_q)
lqx = self.q_approx.log_prob(samples_q, stop_grad=True)
lpx = logp0 + log_factor_value
wx = self.phi(n_samples, lpx, lqx, self.config.method, alpha=self.config.alpha)
wx = tf.stop_gradient(wx)
loss = tf.reduce_sum(wx * (lqx - lpx))
return loss
def log_likelihood_factor(self, samples_q, v_noise, X, y):
assert X.get_shape().ndims == 2, 'illegal inputs'
assert y.get_shape().ndims == 1, 'illegal inputs'
py = self.predict(samples_q, X) # n x k
lik = -0.5 * tf.log(2 * pi * v_noise) - 0.5 * (tf.expand_dims(y, 1) - py) ** 2 /v_noise
return tf.reduce_mean(lik, axis=0)
def log_prior(self, samples_q):
log_p0 = -0.5 * tf.log(2 * pi * self.v_prior) - 0.5 * samples_q **2 / self.v_prior
return tf.reduce_sum(log_p0, axis=1)
|
[
"tensorflow.reduce_sum",
"tensorflow.nn.relu",
"tensorflow.sqrt",
"tensorflow.summary.scalar",
"numpy.log",
"tensorflow.stop_gradient",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"numpy.zeros",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.exp",
"tensorflow.random_normal_initializer",
"tensorflow.log",
"tensorflow.reduce_max",
"tensorflow.greater",
"tensorflow.expand_dims"
] |
[((1769, 1794), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['lq'], {'axis': '(1)'}), '(lq, axis=1)\n', (1782, 1794), True, 'import tensorflow as tf\n'), ((2088, 2161), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""X"""', 'dtype': 'tf.float32', 'shape': '[None, self.config.dim]'}), "(name='X', dtype=tf.float32, shape=[None, self.config.dim])\n", (2102, 2161), True, 'import tensorflow as tf\n'), ((2215, 2271), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""y"""', 'dtype': 'tf.float32', 'shape': '[None]'}), "(name='y', dtype=tf.float32, shape=[None])\n", (2229, 2271), True, 'import tensorflow as tf\n'), ((2448, 2490), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""kl_loss"""', 'self.kl_loss'], {}), "('kl_loss', self.kl_loss)\n", (2465, 2490), True, 'import tensorflow as tf\n'), ((2575, 2617), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""batch_rmse"""', 'self.rmse'], {}), "('batch_rmse', self.rmse)\n", (2592, 2617), True, 'import tensorflow as tf\n'), ((2626, 2664), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""batch_ll"""', 'self.ll'], {}), "('batch_ll', self.ll)\n", (2643, 2664), True, 'import tensorflow as tf\n'), ((3133, 3160), 'tensorflow.reshape', 'tf.reshape', (['w1', '(k * nh, d)'], {}), '(w1, (k * nh, d))\n', (3143, 3160), True, 'import tensorflow as tf\n'), ((3243, 3270), 'tensorflow.reshape', 'tf.reshape', (['b1', '(1, k * nh)'], {}), '(b1, (1, k * nh))\n', (3253, 3270), True, 'import tensorflow as tf\n'), ((3365, 3378), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (3375, 3378), True, 'import tensorflow as tf\n'), ((3527, 3554), 'tensorflow.reshape', 'tf.reshape', (['w2', '(1, k * nh)'], {}), '(w2, (1, k * nh))\n', (3537, 3554), True, 'import tensorflow as tf\n'), ((3583, 3621), 'tensorflow.reshape', 'tf.reshape', (['samples_q[:, nh:]', '(1, -1)'], {}), '(samples_q[:, nh:], (1, -1))\n', (3593, 3621), True, 'import tensorflow as tf\n'), ((5042, 5059), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wx'], {}), '(wx)\n', (5055, 5059), True, 'import tensorflow as tf\n'), ((5161, 5195), 'tensorflow.exp', 'tf.exp', (['self.q_approx._log_v_noise'], {}), '(self.q_approx._log_v_noise)\n', (5167, 5195), True, 'import tensorflow as tf\n'), ((5600, 5620), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['wx'], {}), '(wx)\n', (5616, 5620), True, 'import tensorflow as tf\n'), ((5637, 5668), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(wx * (lqx - lpx))'], {}), '(wx * (lqx - lpx))\n', (5650, 5668), True, 'import tensorflow as tf\n'), ((6031, 6058), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['lik'], {'axis': '(0)'}), '(lik, axis=0)\n', (6045, 6058), True, 'import tensorflow as tf\n'), ((6203, 6232), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_p0'], {'axis': '(1)'}), '(log_p0, axis=1)\n', (6216, 6232), True, 'import tensorflow as tf\n'), ((292, 321), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (309, 321), True, 'import tensorflow as tf\n'), ((1631, 1651), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['qv'], {}), '(qv)\n', (1647, 1651), True, 'import tensorflow as tf\n'), ((1669, 1689), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['qm'], {}), '(qm)\n', (1685, 1689), True, 'import tensorflow as tf\n'), ((3298, 3332), 'tensorflow.matmul', 'tf.matmul', (['X', 'w1'], {'transpose_b': '(True)'}), '(X, w1, transpose_b=True)\n', (3307, 3332), True, 'import tensorflow as tf\n'), ((3829, 3863), 'tensorflow.exp', 'tf.exp', (['self.q_approx._log_v_noise'], {}), '(self.q_approx._log_v_noise)\n', (3835, 3863), True, 'import tensorflow as tf\n'), ((4475, 4494), 'tensorflow.reduce_max', 'tf.reduce_max', (['diff'], {}), '(diff)\n', (4488, 4494), True, 'import tensorflow as tf\n'), ((4512, 4524), 'tensorflow.exp', 'tf.exp', (['diff'], {}), '(diff)\n', (4518, 4524), True, 'import tensorflow as tf\n'), ((1133, 1158), 'tensorflow.sqrt', 'tf.sqrt', (["self.params['v']"], {}), "(self.params['v'])\n", (1140, 1158), True, 'import tensorflow as tf\n'), ((1709, 1728), 'tensorflow.log', 'tf.log', (['(2 * pi * qv)'], {}), '(2 * pi * qv)\n', (1715, 1728), True, 'import tensorflow as tf\n'), ((4035, 4059), 'tensorflow.log', 'tf.log', (['(2 * pi * v_noise)'], {}), '(2 * pi * v_noise)\n', (4041, 4059), True, 'import tensorflow as tf\n'), ((4688, 4709), 'tensorflow.greater', 'tf.greater', (['prob', '(0.5)'], {}), '(prob, 0.5)\n', (4698, 4709), True, 'import tensorflow as tf\n'), ((4740, 4767), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (4753, 4767), True, 'import tensorflow as tf\n'), ((4925, 4944), 'tensorflow.reduce_max', 'tf.reduce_max', (['diff'], {}), '(diff)\n', (4938, 4944), True, 'import tensorflow as tf\n'), ((4962, 4974), 'tensorflow.exp', 'tf.exp', (['diff'], {}), '(diff)\n', (4968, 4974), True, 'import tensorflow as tf\n'), ((5941, 5965), 'tensorflow.log', 'tf.log', (['(2 * pi * v_noise)'], {}), '(2 * pi * v_noise)\n', (5947, 5965), True, 'import tensorflow as tf\n'), ((6121, 6150), 'tensorflow.log', 'tf.log', (['(2 * pi * self.v_prior)'], {}), '(2 * pi * self.v_prior)\n', (6127, 6150), True, 'import tensorflow as tf\n'), ((554, 595), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (582, 595), True, 'import tensorflow as tf\n'), ((1319, 1346), 'tensorflow.exp', 'tf.exp', (['(-self._log_variance)'], {}), '(-self._log_variance)\n', (1325, 1346), True, 'import tensorflow as tf\n'), ((3671, 3699), 'tensorflow.reshape', 'tf.reshape', (['(h * w2)', '(-1, nh)'], {}), '(h * w2, (-1, nh))\n', (3681, 3699), True, 'import tensorflow as tf\n'), ((4170, 4207), 'tensorflow.log', 'tf.log', (['(1.0 * self.config.sample_size)'], {}), '(1.0 * self.config.sample_size)\n', (4176, 4207), True, 'import tensorflow as tf\n'), ((4552, 4573), 'tensorflow.expand_dims', 'tf.expand_dims', (['dx', '(1)'], {}), '(dx, 1)\n', (4566, 4573), True, 'import tensorflow as tf\n'), ((4576, 4597), 'tensorflow.expand_dims', 'tf.expand_dims', (['dx', '(0)'], {}), '(dx, 0)\n', (4590, 4597), True, 'import tensorflow as tf\n'), ((4259, 4280), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['py', '(1)'], {}), '(py, 1)\n', (4273, 4280), True, 'import tensorflow as tf\n'), ((4069, 4089), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (4083, 4089), True, 'import tensorflow as tf\n'), ((5975, 5995), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (5989, 5995), True, 'import tensorflow as tf\n'), ((879, 890), 'numpy.log', 'np.log', (['(1.0)'], {}), '(1.0)\n', (885, 890), True, 'import numpy as np\n'), ((708, 733), 'numpy.zeros', 'np.zeros', (['self.param_size'], {}), '(self.param_size)\n', (716, 733), True, 'import numpy as np\n')]
|
# the API to `contrib.bech32m` is an abomination unto man. This API is slightly less bad
from typing import Optional, Tuple
from hsms.contrib.bech32m import (
bech32_decode as bech32_decode5,
bech32_encode as bech32_encode5,
convertbits,
Encoding,
)
def bech32_decode(text, max_length: int = 90) -> Optional[Tuple[str, bytes, Encoding]]:
prefix, base5_data, encoding = bech32_decode5(text, max_length)
if prefix is None:
return None
base8_data = bytes(convertbits(base5_data, 5, 8))
return prefix, base8_data, encoding
def bech32_encode(prefix: str, blob: bytes, encoding: int = Encoding.BECH32M) -> str:
base5_bin = convertbits(blob, 8, 5)
return bech32_encode5(prefix, base5_bin, encoding)
|
[
"hsms.contrib.bech32m.bech32_encode",
"hsms.contrib.bech32m.bech32_decode",
"hsms.contrib.bech32m.convertbits"
] |
[((393, 425), 'hsms.contrib.bech32m.bech32_decode', 'bech32_decode5', (['text', 'max_length'], {}), '(text, max_length)\n', (407, 425), True, 'from hsms.contrib.bech32m import bech32_decode as bech32_decode5, bech32_encode as bech32_encode5, convertbits, Encoding\n'), ((667, 690), 'hsms.contrib.bech32m.convertbits', 'convertbits', (['blob', '(8)', '(5)'], {}), '(blob, 8, 5)\n', (678, 690), False, 'from hsms.contrib.bech32m import bech32_decode as bech32_decode5, bech32_encode as bech32_encode5, convertbits, Encoding\n'), ((702, 745), 'hsms.contrib.bech32m.bech32_encode', 'bech32_encode5', (['prefix', 'base5_bin', 'encoding'], {}), '(prefix, base5_bin, encoding)\n', (716, 745), True, 'from hsms.contrib.bech32m import bech32_decode as bech32_decode5, bech32_encode as bech32_encode5, convertbits, Encoding\n'), ((492, 521), 'hsms.contrib.bech32m.convertbits', 'convertbits', (['base5_data', '(5)', '(8)'], {}), '(base5_data, 5, 8)\n', (503, 521), False, 'from hsms.contrib.bech32m import bech32_decode as bech32_decode5, bech32_encode as bech32_encode5, convertbits, Encoding\n')]
|
"""
RB-related functions of gates and models
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import warnings as _warnings
import numpy as _np
from pygsti.tools import matrixtools as _mtls
from pygsti.tools import optools as _optls
from pygsti.tools import rbtools as _rbtls
def predicted_rb_number(model, target_model, weights=None, d=None, rtype='EI'):
"""
Predicts the RB error rate from a model.
Uses the "L-matrix" theory from Proctor et al Phys. Rev. Lett. 119, 130502
(2017). Note that this gives the same predictions as the theory in Wallman
Quantum 2, 47 (2018).
This theory is valid for various types of RB, including standard
Clifford RB -- i.e., it will accurately predict the per-Clifford
error rate reported by standard Clifford RB. It is also valid for
"direct RB" under broad circumstances.
For this function to be valid the model should be trace preserving
and completely positive in some representation, but the particular
representation of the model used is irrelevant, as the predicted RB
error rate is a gauge-invariant quantity. The function is likely reliable
when complete positivity is slightly violated, although the theory on
which it is based assumes complete positivity.
Parameters
----------
model : Model
The model to calculate the RB number of. This model is the
model randomly sampled over, so this is not necessarily the
set of physical primitives. In Clifford RB this is a set of
Clifford gates; in "direct RB" this normally would be the
physical primitives.
target_model : Model
The target model, corresponding to `model`. This function is not invariant
under swapping `model` and `target_model`: this Model must be the target model,
and should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
d : int, optional
The Hilbert space dimension. If None, then sqrt(model.dim) is used.
rtype : str, optional
The type of RB error rate, either "EI" or "AGI", corresponding to
different dimension-dependent rescalings of the RB decay constant
p obtained from fitting to Pm = A + Bp^m. "EI" corresponds to
an RB error rate that is associated with entanglement infidelity, which
is the probability of error for a gate with stochastic errors. This is
the RB error rate defined in the "direct RB" protocol, and is given by:
r = (d^2 - 1)(1 - p)/d^2,
The AGI-type r is given by
r = (d - 1)(1 - p)/d,
which is the conventional r definition in Clifford RB. This r is
associated with (gate-averaged) average gate infidelity.
Returns
-------
r : float.
The predicted RB number.
"""
if d is None: d = int(round(_np.sqrt(model.dim)))
p = predicted_rb_decay_parameter(model, target_model, weights=weights)
r = _rbtls.p_to_r(p, d=d, rtype=rtype)
return r
def predicted_rb_decay_parameter(model, target_model, weights=None):
"""
Computes the second largest eigenvalue of the 'L matrix' (see the `L_matrix` function).
For standard Clifford RB and direct RB, this corresponds to the RB decay
parameter p in Pm = A + Bp^m for "reasonably low error" trace preserving and
completely positive gates. See also the `predicted_rb_number` function.
Parameters
----------
model : Model
The model to calculate the RB decay parameter of. This model is the
model randomly sampled over, so this is not necessarily the
set of physical primitives. In Clifford RB this is a set of
Clifford gates; in "direct RB" this normally would be the
physical primitives.
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
Returns
-------
p : float.
The second largest eigenvalue of L. This is the RB decay parameter
for various types of RB.
"""
L = L_matrix(model, target_model, weights=weights)
E = _np.absolute(_np.linalg.eigvals(L))
E = _np.flipud(_np.sort(E))
if abs(E[0] - 1) > 10**(-12):
_warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.")
if E[1].imag > 10**(-10):
_warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.")
p = abs(E[1])
return p
def rb_gauge(model, target_model, weights=None, mx_basis=None, eigenvector_weighting=1.0):
"""
Computes the gauge transformation required so that the RB number matches the average model infidelity.
This function computes the gauge transformation required so that, when the
model is transformed via this gauge-transformation, the RB number -- as
predicted by the function `predicted_rb_number` -- is the average model
infidelity between the transformed `model` model and the target model
`target_model`. This transformation is defined Proctor et al
Phys. Rev. Lett. 119, 130502 (2017), and see also Wallman Quantum 2, 47
(2018).
Parameters
----------
model : Model
The RB model. This is not necessarily the set of physical primitives -- it
is the model randomly sampled over in the RB protocol (e.g., the Cliffords).
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
mx_basis : {"std","gm","pp"}, optional
The basis of the models. If None, the basis is obtained from the model.
eigenvector_weighting : float, optional
Must be non-zero. A weighting on the eigenvector with eigenvalue that
is the RB decay parameter, in the sum of this eigenvector and the
eigenvector with eigenvalue of 1 that defines the returned matrix `l_operator`.
The value of this factor does not change whether this `l_operator` transforms into
a gauge in which r = AGsI, but it may impact on other properties of the
gates in that gauge. It is irrelevant if the gates are unital.
Returns
-------
l_operator : array
The matrix defining the gauge-transformation.
"""
gam, vecs = _np.linalg.eig(L_matrix(model, target_model, weights=weights))
absgam = abs(gam)
index_max = _np.argmax(absgam)
gam_max = gam[index_max]
if abs(gam_max - 1) > 10**(-12):
_warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.")
absgam[index_max] = 0.0
index_2ndmax = _np.argmax(absgam)
decay_constant = gam[index_2ndmax]
if decay_constant.imag > 10**(-12):
_warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.")
vec_l_operator = vecs[:, index_max] + eigenvector_weighting * vecs[:, index_2ndmax]
if mx_basis is None:
mx_basis = model.basis.name
assert(mx_basis == 'pp' or mx_basis == 'gm' or mx_basis == 'std'), "mx_basis must be 'gm', 'pp' or 'std'."
if mx_basis in ('pp', 'gm'):
assert(_np.amax(vec_l_operator.imag) < 10**(-15)), "If 'gm' or 'pp' basis, RB gauge matrix should be real."
vec_l_operator = vec_l_operator.real
vec_l_operator[abs(vec_l_operator) < 10**(-15)] = 0.
l_operator = _mtls.unvec(vec_l_operator)
return l_operator
def transform_to_rb_gauge(model, target_model, weights=None, mx_basis=None, eigenvector_weighting=1.0):
"""
Transforms a Model into the "RB gauge" (see the `RB_gauge` function).
This notion was introduced in Proctor et al Phys. Rev. Lett. 119, 130502
(2017). This gauge is a function of both the model and its target. These may
be input in any gauge, for the purposes of obtaining "r = average model
infidelity" between the output :class:`Model` and `target_model`.
Parameters
----------
model : Model
The RB model. This is not necessarily the set of physical primitives -- it
is the model randomly sampled over in the RB protocol (e.g., the Cliffords).
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
mx_basis : {"std","gm","pp"}, optional
The basis of the models. If None, the basis is obtained from the model.
eigenvector_weighting : float, optional
Must be non-zero. A weighting on the eigenvector with eigenvalue that
is the RB decay parameter, in the sum of this eigenvector and the
eigenvector with eigenvalue of 1 that defines the returned matrix `l_operator`.
The value of this factor does not change whether this `l_operator` transforms into
a gauge in which r = AGsI, but it may impact on other properties of the
gates in that gauge. It is irrelevant if the gates are unital.
Returns
-------
model_in_RB_gauge : Model
The model `model` transformed into the "RB gauge".
"""
from ..models.gaugegroup import FullGaugeGroupElement as _FullGaugeGroupElement
l = rb_gauge(model, target_model, weights=weights, mx_basis=mx_basis,
eigenvector_weighting=eigenvector_weighting)
model_in_RB_gauge = model.copy()
S = _FullGaugeGroupElement(_np.linalg.inv(l))
model_in_RB_gauge.transform_inplace(S)
return model_in_RB_gauge
def L_matrix(model, target_model, weights=None): # noqa N802
"""
Constructs a generalization of the 'L-matrix' linear operator on superoperators.
From Proctor et al Phys. Rev. Lett. 119, 130502 (2017), the 'L-matrix' is
represented as a matrix via the "stack" operation. This eigenvalues of this
matrix describe the decay constant (or constants) in an RB decay curve for
an RB protocol whereby random elements of the provided model are sampled
according to the `weights` probability distribution over the model. So, this
facilitates predictions of Clifford RB and direct RB decay curves.
Parameters
----------
model : Model
The RB model. This is not necessarily the set of physical primitives -- it
is the model randomly sampled over in the RB protocol (e.g., the Cliffords).
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
Returns
-------
L : float
A weighted version of the L operator from Proctor et al Phys. Rev. Lett.
119, 130502 (2017), represented as a matrix using the 'stacking' convention.
"""
if weights is None:
weights = {}
for key in list(target_model.operations.keys()):
weights[key] = 1.
normalizer = _np.sum(_np.array([weights[key] for key in list(target_model.operations.keys())]))
L_matrix = (1 / normalizer) * _np.sum(
weights[key] * _np.kron(
model.operations[key].to_dense(on_space='HilbertSchmidt').T,
_np.linalg.inv(target_model.operations[key].to_dense(on_space='HilbertSchmidt'))
) for key in target_model.operations.keys())
return L_matrix
def R_matrix_predicted_rb_decay_parameter(model, group, group_to_model=None, weights=None): # noqa N802
"""
Returns the second largest eigenvalue of a generalization of the 'R-matrix' [see the `R_matrix` function].
Introduced in Proctor et al Phys. Rev. Lett. 119, 130502 (2017). This
number is a prediction of the RB decay parameter for trace-preserving gates
and a variety of forms of RB, including Clifford and direct RB. This
function creates a matrix which scales super-exponentially in the number of
qubits.
Parameters
----------
model : Model
The model to predict the RB decay paramter for. If `group_to_model` is
None, the labels of the gates in `model` should be the same as the labels of the
group elements in `group`. For Clifford RB this would be the clifford model,
for direct RB it would be the primitive gates.
group : MatrixGroup
The group that the `model` model contains gates from (`model` does not
need to be the full group, and could be a subset of `group`). For
Clifford RB and direct RB, this would be the Clifford group.
group_to_model : dict, optional
If not None, a dictionary that maps labels of group elements to labels
of `model`. If `model` and `group` elements have the same labels, this dictionary
is not required. Otherwise it is necessary.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates in `model`
and the values are the unnormalized probabilities to apply each gate at
each stage of the RB protocol. If not None, the values in weights must all
be positive or zero, and they must not all be zero (because, when divided by
their sum, they must be a valid probability distribution). If None, the
weighting defaults to an equal weighting on all gates, as used in most RB
protocols.
Returns
-------
p : float
The predicted RB decay parameter. Valid for standard Clifford RB or direct RB
with trace-preserving gates, and in a range of other circumstances.
"""
R = R_matrix(model, group, group_to_model=group_to_model, weights=weights)
E = _np.absolute(_np.linalg.eigvals(R))
E = _np.flipud(_np.sort(E))
p = E[1]
return p
def R_matrix(model, group, group_to_model=None, weights=None): # noqa N802
"""
Constructs a generalization of the 'R-matrix' of Proctor et al Phys. Rev. Lett. 119, 130502 (2017).
This matrix described the exact behaviour of the average success
probablities of RB sequences. This matrix is super-exponentially large in
the number of qubits, but can be constructed for 1-qubit models.
Parameters
----------
model : Model
The noisy model (e.g., the Cliffords) to calculate the R matrix of.
The correpsonding `target` model (not required in this function)
must be equal to or a subset of (a faithful rep of) the group `group`.
If `group_to_model `is None, the labels of the gates in model should be
the same as the labels of the corresponding group elements in `group`.
For Clifford RB `model` should be the clifford model; for direct RB
this should be the native model.
group : MatrixGroup
The group that the `model` model contains gates from. For Clifford RB
or direct RB, this would be the Clifford group.
group_to_model : dict, optional
If not None, a dictionary that maps labels of group elements to labels
of model. This is required if the labels of the gates in `model` are different
from the labels of the corresponding group elements in `group`.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates in model
and the values are the unnormalized probabilities to apply each gate at
for each layer of the RB protocol. If None, the weighting defaults to an
equal weighting on all gates, as used in most RB protocols (e.g., Clifford
RB).
Returns
-------
R : float
A weighted, a subset-sampling generalization of the 'R-matrix' from Proctor
et al Phys. Rev. Lett. 119, 130502 (2017).
"""
if group_to_model is None:
for key in list(model.operations.keys()):
assert(key in group.labels), "Gates labels are not in `group`!"
else:
for key in list(model.operations.keys()):
assert(key in group_to_model.values()), "Gates labels are not in `group_to_model`!"
d = int(round(_np.sqrt(model.dim)))
group_dim = len(group)
R_dim = group_dim * d**2
R = _np.zeros([R_dim, R_dim], float)
if weights is None:
weights = {}
for key in list(model.operations.keys()):
weights[key] = 1.
normalizer = _np.sum(_np.array([weights[key] for key in list(model.operations.keys())]))
for i in range(0, group_dim):
for j in range(0, group_dim):
label_itoj = group.labels[group.product([group.inverse_index(i), j])]
if group_to_model is not None:
if label_itoj in group_to_model:
gslabel = group_to_model[label_itoj]
R[j * d**2:(j + 1) * d**2, i * d**2:(i + 1) * d**2] = weights[gslabel] * model.operations[gslabel]
else:
if label_itoj in list(model.operations.keys()):
gslabel = label_itoj
R[j * d**2:(j + 1) * d**2, i * d**2:(i + 1) * d**2] = weights[gslabel] * model.operations[gslabel]
R = R / normalizer
return R
### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst".
### THIS SHOULD BE ADDED BACK IN AT SOME POINT.
# def exact_rb_asps(model, group, m_max, m_min=0, m_step=1, success_outcomelabel=('0',),
# group_to_model=None, weights=None, compilation=None, group_twirled=False):
# """
# Calculates the exact RB average success probablilites (ASP).
# Uses some generalizations of the formula given Proctor et al
# Phys. Rev. Lett. 119, 130502 (2017). This formula does not scale well with
# group size and qubit number, and for the Clifford group it is likely only
# practical for a single qubit.
# Parameters
# ----------
# model : Model
# The noisy model (e.g., the Cliffords) to calculate the R matrix of.
# The correpsonding `target` model (not required in this function)
# must be equal to or a subset of (a faithful rep of) the group `group`.
# If group_to_model is None, the labels of the gates in model should be
# the same as the labels of the corresponding group elements in `group`.
# For Clifford RB `model` should be the clifford model; for direct RB
# this should be the native model.
# group : MatrixGroup
# The group that the `model` model contains gates from. For Clifford RB
# or direct RB, this would be the Clifford group.
# m_max : int
# The maximal sequence length of the random gates, not including the
# inversion gate.
# m_min : int, optional
# The minimal sequence length. Defaults to the smallest valid value of 0.
# m_step : int, optional
# The step size between sequence lengths. Defaults to the smallest valid
# value of 1.
# success_outcomelabel : str or tuple, optional
# The outcome label associated with success.
# group_to_model : dict, optional
# If not None, a dictionary that maps labels of group elements to labels
# of model. This is required if the labels of the gates in `model` are different
# from the labels of the corresponding group elements in `group`.
# weights : dict, optional
# If not None, a dictionary of floats, whereby the keys are the gates in model
# and the values are the unnormalized probabilities to apply each gate at
# for each layer of the RB protocol. If None, the weighting defaults to an
# equal weighting on all gates, as used in most RB protocols (e.g., Clifford
# RB).
# compilation : dict, optional
# If `model` is not the full group `group` (with the same labels), then a
# compilation for the group elements, used to implement the inversion gate
# (and the initial randomgroup element, if `group_twirled` is True). This
# is a dictionary with the group labels as keys and a gate sequence of the
# elements of `model` as values.
# group_twirled : bool, optional
# If True, the random sequence starts with a single uniformly random group
# element before the m random elements of `model`.
# Returns
# -------
# m : float
# Array of sequence length values that the ASPs have been calculated for.
# P_m : float
# Array containing ASP values for the specified sequence length values.
# """
# if compilation is None:
# for key in list(model.operations.keys()):
# assert(key in group.labels), "Gates labels are not in `group`, so `compilation must be specified."
# for label in group.labels:
# assert(label in list(model.operations.keys())
# ), "Some group elements not in `model`, so `compilation must be specified."
# i_max = _np.floor((m_max - m_min) / m_step).astype('int')
# m = _np.zeros(1 + i_max, int)
# P_m = _np.zeros(1 + i_max, float)
# group_dim = len(group)
# R = R_matrix(model, group, group_to_model=group_to_model, weights=weights)
# success_prepLabel = list(model.preps.keys())[0] # just take first prep
# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \
# else success_outcomelabel
# extended_E = _np.kron(_mtls.column_basis_vector(0, group_dim).T, model.povms['Mdefault'][success_effectLabel].T)
# extended_rho = _np.kron(_mtls.column_basis_vector(0, group_dim), model.preps[success_prepLabel])
# if compilation is None:
# extended_E = group_dim * _np.dot(extended_E, R)
# if group_twirled is True:
# extended_rho = _np.dot(R, extended_rho)
# else:
# full_model = _cnst.create_explicit_alias_model(model, compilation)
# R_fullgroup = R_matrix(full_model, group)
# extended_E = group_dim * _np.dot(extended_E, R_fullgroup)
# if group_twirled is True:
# extended_rho = _np.dot(R_fullgroup, extended_rho)
# Rstep = _np.linalg.matrix_power(R, m_step)
# Riterate = _np.linalg.matrix_power(R, m_min)
# for i in range(0, 1 + i_max):
# m[i] = m_min + i * m_step
# P_m[i] = _np.dot(extended_E, _np.dot(Riterate, extended_rho))
# Riterate = _np.dot(Rstep, Riterate)
# return m, P_m
### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst"
### THIS SHOULD BE ADDED BACK IN AT SOME POINT.
# def L_matrix_asps(model, target_model, m_max, m_min=0, m_step=1, success_outcomelabel=('0',), # noqa N802
# compilation=None, group_twirled=False, weights=None, gauge_optimize=True,
# return_error_bounds=False, norm='diamond'):
# """
# Computes RB average survival probablities, as predicted by the 'L-matrix' theory.
# This theory was introduced in Proctor et al Phys. Rev. Lett. 119, 130502
# (2017). Within the function, the model is gauge-optimized to target_model. This is
# *not* optimized to the gauge specified by Proctor et al, but instead performs the
# standard pyGSTi gauge-optimization (using the frobenius distance). In most cases,
# this is likely to be a reasonable proxy for the gauge optimization perscribed by
# Proctor et al.
# Parameters
# ----------
# model : Model
# The noisy model.
# target_model : Model
# The target model.
# m_max : int
# The maximal sequence length of the random gates, not including the inversion gate.
# m_min : int, optional
# The minimal sequence length. Defaults to the smallest valid value of 0.
# m_step : int, optional
# The step size between sequence lengths.
# success_outcomelabel : str or tuple, optional
# The outcome label associated with success.
# compilation : dict, optional
# If `model` is not the full group, then a compilation for the group elements,
# used to implement the inversion gate (and the initial random group element,
# if `group_twirled` is True). This is a dictionary with the group labels as
# keys and a gate sequence of the elements of `model` as values.
# group_twirled : bool, optional
# If True, the random sequence starts with a single uniformly random group
# element before the m random elements of `model`.
# weights : dict, optional
# If not None, a dictionary of floats, whereby the keys are the gates in model
# and the values are the unnormalized probabilities to apply each gate at
# for each layer of the RB protocol. If None, the weighting defaults to an
# equal weighting on all gates, as used in most RB protocols (e.g., Clifford
# RB).
# gauge_optimize : bool, optional
# If True a gauge-optimization to the target model is implemented before
# calculating all quantities. If False, no gauge optimization is performed.
# Whether or not a gauge optimization is performed does not affect the rate of
# decay but it will generally affect the exact form of the decay. E.g., if a
# perfect model is given to the function -- but in the "wrong" gauge -- no
# decay will be observed in the output P_m, but the P_m can be far from 1 (even
# for perfect SPAM) for all m. The gauge optimization is optional, as it is
# not guaranteed to always improve the accuracy of the reported P_m, although when
# gauge optimization is performed this limits the possible deviations of the
# reported P_m from the true P_m.
# return_error_bounds : bool, optional
# Sets whether or not to return error bounds for how far the true ASPs can deviate
# from the values returned by this function.
# norm : str, optional
# The norm used in the error bound calculation. Either 'diamond' for the diamond
# norm (the default) or '1to1' for the Hermitian 1 to 1 norm.
# Returns
# -------
# m : float
# Array of sequence length values that the ASPs have been calculated for.
# P_m : float
# Array containing predicted ASP values for the specified sequence length values.
# if error_bounds is True :
# lower_bound: float
# Array containing lower bounds on the possible ASP values
# upper_bound: float
# Array containing upper bounds on the possible ASP values
# """
# d = int(round(_np.sqrt(model.dim)))
# if gauge_optimize:
# model_go = _algs.gaugeopt_to_target(model, target_model)
# else:
# model_go = model.copy()
# L = L_matrix(model_go, target_model, weights=weights)
# success_prepLabel = list(model.preps.keys())[0] # just take first prep
# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \
# else success_outcomelabel
# identity_vec = _mtls.vec(_np.identity(d**2, float))
# if compilation is not None:
# model_group = _cnst.create_explicit_alias_model(model_go, compilation)
# model_target_group = _cnst.create_explicit_alias_model(target_model, compilation)
# delta = gate_dependence_of_errormaps(model_group, model_target_group, norm=norm)
# emaps = errormaps(model_group, model_target_group)
# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg'])
# if group_twirled is True:
# L_group = L_matrix(model_group, model_target_group)
# if compilation is None:
# delta = gate_dependence_of_errormaps(model_go, target_model, norm=norm)
# emaps = errormaps(model_go, target_model)
# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg'])
# i_max = _np.floor((m_max - m_min) / m_step).astype('int')
# m = _np.zeros(1 + i_max, int)
# P_m = _np.zeros(1 + i_max, float)
# upper_bound = _np.zeros(1 + i_max, float)
# lower_bound = _np.zeros(1 + i_max, float)
# Lstep = _np.linalg.matrix_power(L, m_step)
# Literate = _np.linalg.matrix_power(L, m_min)
# for i in range(0, 1 + i_max):
# m[i] = m_min + i * m_step
# if group_twirled:
# L_m_rdd = _mtls.unvec(_np.dot(L_group, _np.dot(Literate, identity_vec)))
# else:
# L_m_rdd = _mtls.unvec(_np.dot(Literate, identity_vec))
# P_m[i] = _np.dot(E_eff, _np.dot(L_m_rdd, model_go.preps[success_prepLabel]))
# Literate = _np.dot(Lstep, Literate)
# upper_bound[i] = P_m[i] + delta / 2
# lower_bound[i] = P_m[i] - delta / 2
# if upper_bound[i] > 1:
# upper_bound[i] = 1.
# if lower_bound[i] < 0:
# lower_bound[i] = 0.
# if return_error_bounds:
# return m, P_m, lower_bound, upper_bound
# else:
# return m, P_m
def errormaps(model, target_model):
"""
Computes the 'left-multiplied' error maps associated with a noisy gate set, along with the average error map.
This is the model [E_1,...] such that
`G_i = E_iT_i`,
where `T_i` is the gate which `G_i` is a noisy
implementation of. There is an additional gate in the set, that has
the key 'Gavg'. This is the average of the error maps.
Parameters
----------
model : Model
The imperfect model.
target_model : Model
The target model.
Returns
-------
errormaps : Model
The left multplied error gates, along with the average error map,
with the key 'Gavg'.
"""
errormaps_gate_list = []
errormaps = model.copy()
for gate in list(target_model.operations.keys()):
errormaps.operations[gate] = _np.dot(model.operations[gate],
_np.transpose(target_model.operations[gate]))
errormaps_gate_list.append(errormaps.operations[gate])
errormaps.operations['Gavg'] = _np.mean(_np.array([i for i in errormaps_gate_list]),
axis=0, dtype=_np.float64)
return errormaps
def gate_dependence_of_errormaps(model, target_model, norm='diamond', mx_basis=None):
"""
Computes the "gate-dependence of errors maps" parameter defined by
delta_avg = avg_i|| E_i - avg_i(E_i) ||,
where E_i are the error maps, and the norm is either the diamond norm
or the 1-to-1 norm. This quantity is defined in Magesan et al PRA 85
042311 2012.
Parameters
----------
model : Model
The actual model
target_model : Model
The target model.
norm : str, optional
The norm used in the calculation. Can be either 'diamond' for
the diamond norm, or '1to1' for the Hermitian 1 to 1 norm.
mx_basis : {"std","gm","pp"}, optional
The basis of the models. If None, the basis is obtained from
the model.
Returns
-------
delta_avg : float
The value of the parameter defined above.
"""
error_gs = errormaps(model, target_model)
delta = []
if mx_basis is None:
mx_basis = model.basis.name
assert(mx_basis == 'pp' or mx_basis == 'gm' or mx_basis == 'std'), "mx_basis must be 'gm', 'pp' or 'std'."
for gate in list(target_model.operations.keys()):
if norm == 'diamond':
print(error_gs.operations[gate])
print(error_gs.operations['Gavg'])
delta.append(_optls.diamonddist(error_gs.operations[gate], error_gs.operations['Gavg'],
mx_basis=mx_basis))
elif norm == '1to1':
gate_dif = error_gs.operations[gate] - error_gs.operations['Gavg']
delta.append(_optls.norm1to1(gate_dif, num_samples=1000, mx_basis=mx_basis, return_list=False))
else:
raise ValueError("Only diamond or 1to1 norm available.")
delta_avg = _np.mean(delta)
return delta_avg
# Future : perhaps put these back in.
#def Magesan_theory_predicted_decay(model, target_model, mlist, success_outcomelabel=('0',),
# norm='1to1', order='zeroth', return_all = False):
#
# assert(order == 'zeroth' or order == 'first')
#
# d = int(round(_np.sqrt(model.dim)))
# MTPs = {}
# MTPs['r'] = gateset_infidelity(model,target_model,itype='AGI')
# MTPs['p'] = _analysis.r_to_p(MTPs['r'],d,rtype='AGI')
# MTPs['delta'] = gate_dependence_of_errormaps(model, target_model, norm)
# error_gs = errormaps(model, target_model)
#
# R_list = []
# Q_list = []
# for gate in list(target_model.operations.keys()):
# R_list.append(_np.dot(_np.dot(error_gs.operations[gate],target_model.operations[gate]),
# _np.dot(error_gs.operations['Gavg'],_np.transpose(target_model.operations[gate]))))
# Q_list.append(_np.dot(target_model.operations[gate],
# _np.dot(error_gs.operations[gate],_np.transpose(target_model.operations[gate]))))
#
# error_gs.operations['GR'] = _np.mean(_np.array([ i for i in R_list]),axis=0)
# error_gs.operations['GQ'] = _np.mean(_np.array([ i for i in Q_list]),axis=0)
# error_gs.operations['GQ2'] = _np.dot(error_gs.operations['GQ'],error_gs.operations['Gavg'])
# error_gs.preps['rhoc_mixed'] = 1./d*_cnst.create_identity_vec(error_gs.basis)#
#
# #Assumes standard POVM labels
# povm = _objs.UnconstrainedPOVM( [('0_cm', target_model.povms['Mdefault']['0']),
# ('1_cm', target_model.povms['Mdefault']['1'])] )
# ave_error_gsl = _cnst.to_circuits([('rho0','Gavg'),('rho0','GR'),('rho0','Gavg','GQ')])
# data = _cnst.simulate_data(error_gs, ave_error_gsl, num_samples=1, sample_error="none")#
# pr_L_p = data[('rho0','Gavg')][success_outcomelabel]
# pr_L_I = data[('rho0','Gavg')][success_outcomelabel_cm]
# pr_R_p = data[('rho0','GR')][success_outcomelabel]
# pr_R_I = data[('rho0','GR')][success_outcomelabel_cm]
# pr_Q_p = data[('rho0','Gavg','GQ')][success_outcomelabel]
# p = MTPs['p']
# B_1 = pr_R_I
# A_1 = (pr_Q_p/p) - pr_L_p + ((p -1)*pr_L_I/p) + ((pr_R_p - pr_R_I)/p)
# C_1 = pr_L_p - pr_L_I
# q = _tls.average_gate_infidelity(error_gs.operations['GQ2'],_np.identity(d**2,float))
# q = _analysis.r_to_p(q,d,rtype='AGI')
#
# if order == 'zeroth':
# MTPs['A'] = pr_L_I
# MTPs['B'] = pr_L_p - pr_L_I
# if order == 'first':
# MTPs['A'] = B_1
# MTPs['B'] = A_1 - C_1*(q - 1)/p**2
# MTPs['C'] = C_1*(q- p**2)/p**2
#
# if order == 'zeroth':
# Pm = MTPs['A'] + MTPs['B']*MTPs['p']**_np.array(mlist)
# if order == 'first':
# Pm = MTPs['A'] + (MTPs['B'] + _np.array(mlist)*MTPs['C'])*MTPs['p']**_np.array(mlist)
#
# sys_eb = (MTPs['delta'] + 1)**(_np.array(mlist)+1) - 1
# if order == 'first':
# sys_eb = sys_eb - (_np.array(mlist)+1)*MTPs['delta']
#
# upper = Pm + sys_eb
# upper[upper > 1]=1.
#
# lower = Pm - sys_eb
# lower[lower < 0]=0.
#
# return mlist, Pm, upper, lower, MTPs
|
[
"numpy.linalg.eigvals",
"pygsti.tools.optools.diamonddist",
"numpy.argmax",
"pygsti.tools.rbtools.p_to_r",
"pygsti.tools.matrixtools.unvec",
"numpy.zeros",
"numpy.transpose",
"numpy.amax",
"numpy.sort",
"numpy.mean",
"numpy.linalg.inv",
"numpy.array",
"warnings.warn",
"pygsti.tools.optools.norm1to1",
"numpy.sqrt"
] |
[((4189, 4223), 'pygsti.tools.rbtools.p_to_r', '_rbtls.p_to_r', (['p'], {'d': 'd', 'rtype': 'rtype'}), '(p, d=d, rtype=rtype)\n', (4202, 4223), True, 'from pygsti.tools import rbtools as _rbtls\n'), ((9132, 9150), 'numpy.argmax', '_np.argmax', (['absgam'], {}), '(absgam)\n', (9142, 9150), True, 'import numpy as _np\n'), ((9374, 9392), 'numpy.argmax', '_np.argmax', (['absgam'], {}), '(absgam)\n', (9384, 9392), True, 'import numpy as _np\n'), ((10124, 10151), 'pygsti.tools.matrixtools.unvec', '_mtls.unvec', (['vec_l_operator'], {}), '(vec_l_operator)\n', (10135, 10151), True, 'from pygsti.tools import matrixtools as _mtls\n'), ((20112, 20144), 'numpy.zeros', '_np.zeros', (['[R_dim, R_dim]', 'float'], {}), '([R_dim, R_dim], float)\n', (20121, 20144), True, 'import numpy as _np\n'), ((35880, 35895), 'numpy.mean', '_np.mean', (['delta'], {}), '(delta)\n', (35888, 35895), True, 'import numpy as _np\n'), ((6111, 6132), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['L'], {}), '(L)\n', (6129, 6132), True, 'import numpy as _np\n'), ((6153, 6164), 'numpy.sort', '_np.sort', (['E'], {}), '(E)\n', (6161, 6164), True, 'import numpy as _np\n'), ((6208, 6317), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the model is not approximately trace-preserving."""'], {}), "(\n 'Output may be unreliable because the model is not approximately trace-preserving.'\n )\n", (6222, 6317), True, 'import warnings as _warnings\n'), ((6347, 6468), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the RB decay constant has a significant imaginary component."""'], {}), "(\n 'Output may be unreliable because the RB decay constant has a significant imaginary component.'\n )\n", (6361, 6468), True, 'import warnings as _warnings\n'), ((9226, 9335), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the model is not approximately trace-preserving."""'], {}), "(\n 'Output may be unreliable because the model is not approximately trace-preserving.'\n )\n", (9240, 9335), True, 'import warnings as _warnings\n'), ((9480, 9601), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the RB decay constant has a significant imaginary component."""'], {}), "(\n 'Output may be unreliable because the RB decay constant has a significant imaginary component.'\n )\n", (9494, 9601), True, 'import warnings as _warnings\n'), ((12826, 12843), 'numpy.linalg.inv', '_np.linalg.inv', (['l'], {}), '(l)\n', (12840, 12843), True, 'import numpy as _np\n'), ((17665, 17686), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['R'], {}), '(R)\n', (17683, 17686), True, 'import numpy as _np\n'), ((17707, 17718), 'numpy.sort', '_np.sort', (['E'], {}), '(E)\n', (17715, 17718), True, 'import numpy as _np\n'), ((33948, 33991), 'numpy.array', '_np.array', (['[i for i in errormaps_gate_list]'], {}), '([i for i in errormaps_gate_list])\n', (33957, 33991), True, 'import numpy as _np\n'), ((9903, 9932), 'numpy.amax', '_np.amax', (['vec_l_operator.imag'], {}), '(vec_l_operator.imag)\n', (9911, 9932), True, 'import numpy as _np\n'), ((20026, 20045), 'numpy.sqrt', '_np.sqrt', (['model.dim'], {}), '(model.dim)\n', (20034, 20045), True, 'import numpy as _np\n'), ((33794, 33838), 'numpy.transpose', '_np.transpose', (['target_model.operations[gate]'], {}), '(target_model.operations[gate])\n', (33807, 33838), True, 'import numpy as _np\n'), ((4084, 4103), 'numpy.sqrt', '_np.sqrt', (['model.dim'], {}), '(model.dim)\n', (4092, 4103), True, 'import numpy as _np\n'), ((35425, 35522), 'pygsti.tools.optools.diamonddist', '_optls.diamonddist', (['error_gs.operations[gate]', "error_gs.operations['Gavg']"], {'mx_basis': 'mx_basis'}), "(error_gs.operations[gate], error_gs.operations['Gavg'],\n mx_basis=mx_basis)\n", (35443, 35522), True, 'from pygsti.tools import optools as _optls\n'), ((35697, 35783), 'pygsti.tools.optools.norm1to1', '_optls.norm1to1', (['gate_dif'], {'num_samples': '(1000)', 'mx_basis': 'mx_basis', 'return_list': '(False)'}), '(gate_dif, num_samples=1000, mx_basis=mx_basis, return_list=\n False)\n', (35712, 35783), True, 'from pygsti.tools import optools as _optls\n')]
|
import csv
import datetime
import os
from django.contrib.gis.geos import Point
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from geopy import Nominatim
from countries.models import Country
from report.models import Report, Sighting, ReportedViaChoice
from users.emails import overdue_reports_reminder
from users.models import User
class Command(BaseCommand):
help = "compute first sighting"
def handle(self, *args, **kwargs):
send_overdue_reminders()
def send_overdue_reminders():
for user in User.objects.filter(role__in=[User.COMMUNITY_LIAISON, User.MODERATOR, User.ADMIN]):
if user.enable_email_reminders:
overdue_tasks = user.get_overdue_tasks()
if overdue_tasks.count() and user.email:
overdue_reports_reminder(overdue_tasks, user)
print(f"Log email reminder sent to {user.email}")
|
[
"users.emails.overdue_reports_reminder",
"users.models.User.objects.filter"
] |
[((570, 657), 'users.models.User.objects.filter', 'User.objects.filter', ([], {'role__in': '[User.COMMUNITY_LIAISON, User.MODERATOR, User.ADMIN]'}), '(role__in=[User.COMMUNITY_LIAISON, User.MODERATOR, User.\n ADMIN])\n', (589, 657), False, 'from users.models import User\n'), ((817, 862), 'users.emails.overdue_reports_reminder', 'overdue_reports_reminder', (['overdue_tasks', 'user'], {}), '(overdue_tasks, user)\n', (841, 862), False, 'from users.emails import overdue_reports_reminder\n')]
|
import json
import logging
import os
import pprint
import shutil
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import deepdiff
from tests.test_helpers.type_helpers import PytestConfig
logger = logging.getLogger(__name__)
IGNORE_PATH_TIMESTAMPS = [
# Ignore timestamps from the ETL pipeline. A couple examples:
# root[0]['proposedSnapshot']['com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot']['aspects'][0]['com.linkedin.pegasus2avro.common.Ownership']['lastModified']['time']
# root[69]['proposedSnapshot']['com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot']['aspects'][0]['com.linkedin.pegasus2avro.schema.SchemaMetadata']['lastModified']['time']"
# root[0]['proposedSnapshot']['com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot']['aspects'][1]['com.linkedin.pegasus2avro.dataset.UpstreamLineage']['upstreams'][0]['auditStamp']['time']
r"root\[\d+\]\['proposedSnapshot'\].+\['aspects'\].+\['created'\]\['time'\]",
r"root\[\d+\]\['proposedSnapshot'\].+\['aspects'\].+\['lastModified'\]\['time'\]",
r"root\[\d+\]\['proposedSnapshot'\].+\['aspects'\].+\['createStamp'\]\['time'\]",
r"root\[\d+\]\['proposedSnapshot'\].+\['aspects'\].+\['auditStamp'\]\['time'\]",
]
class MCEConstants:
PROPOSED_SNAPSHOT = "proposedSnapshot"
DATASET_SNAPSHOT_CLASS = (
"com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot"
)
class MCPConstants:
CHANGE_TYPE = "changeType"
ENTITY_URN = "entityUrn"
ENTITY_TYPE = "entityType"
ASPECT_NAME = "aspectName"
ASPECT_VALUE = "aspect"
class EntityType:
DATASET = "dataset"
PIPELINE = "dataFlow"
FLOW = "dataFlow"
TASK = "dataJob"
JOB = "dataJob"
USER = "corpuser"
GROUP = "corpGroup"
def load_json_file(filename: Union[str, os.PathLike]) -> object:
with open(str(filename)) as f:
a = json.load(f)
return a
def clean_nones(value):
"""
Recursively remove all None values from dictionaries and lists, and returns
the result as a new dictionary or list.
"""
if isinstance(value, list):
return [clean_nones(x) for x in value if x is not None]
elif isinstance(value, dict):
return {key: clean_nones(val) for key, val in value.items() if val is not None}
else:
return value
def assert_mces_equal(
output: object, golden: object, ignore_paths: Optional[List[str]] = None
) -> None:
# This method assumes we're given a list of MCE json objects.
diff = deepdiff.DeepDiff(
golden, output, exclude_regex_paths=ignore_paths, ignore_order=True
)
if diff:
# Attempt a clean diff (removing None-s)
assert isinstance(output, list)
assert isinstance(golden, list)
clean_output = [clean_nones(o) for o in output]
clean_golden = [clean_nones(g) for g in golden]
clean_diff = deepdiff.DeepDiff(
clean_golden,
clean_output,
exclude_regex_paths=ignore_paths,
ignore_order=True,
)
if clean_diff != diff:
logger.warning(
f"MCE-s differ, clean MCE-s are fine\n{pprint.pformat(diff)}"
)
diff = clean_diff
assert (
not diff
), f"MCEs differ\n{pprint.pformat(diff)} \n output was: {json.dumps(output)}"
def check_golden_file(
pytestconfig: PytestConfig,
output_path: Union[str, os.PathLike],
golden_path: Union[str, os.PathLike],
ignore_paths: Optional[List[str]] = None,
) -> None:
update_golden = pytestconfig.getoption("--update-golden-files")
golden_exists = os.path.isfile(golden_path)
if not update_golden and not golden_exists:
raise FileNotFoundError(
"Golden file does not exist. Please run with the --update-golden-files option to create."
)
output = load_json_file(output_path)
# if updating a golden file that doesn't exist yet, load the output again
if update_golden and not golden_exists:
golden = load_json_file(output_path)
shutil.copyfile(str(output_path), str(golden_path))
else:
golden = load_json_file(golden_path)
try:
assert_mces_equal(output, golden, ignore_paths)
except AssertionError as e:
# only update golden files if the diffs are not empty
if update_golden:
shutil.copyfile(str(output_path), str(golden_path))
# raise the error if we're just running the test
else:
raise e
def _get_field_for_entity_type_in_mce(entity_type: str) -> str:
"""Returns the field to look for depending on the type of entity in the MCE"""
if entity_type == EntityType.DATASET:
return MCEConstants.DATASET_SNAPSHOT_CLASS
raise Exception(f"Not implemented for entity_type {entity_type}")
def _get_filter(
mce: bool = False, mcp: bool = False, entity_type: Optional[str] = None
) -> Callable[[Dict], bool]:
if mce:
# cheap way to determine if we are working with an MCE for the appropriate entity_type
if entity_type:
return (
lambda x: MCEConstants.PROPOSED_SNAPSHOT in x
and _get_field_for_entity_type_in_mce(str(entity_type))
in x[MCEConstants.PROPOSED_SNAPSHOT]
)
else:
return lambda x: MCEConstants.PROPOSED_SNAPSHOT in x
if mcp:
# cheap way to determine if we are working with an MCP
return lambda x: MCPConstants.CHANGE_TYPE in x and (
x[MCPConstants.ENTITY_TYPE] == entity_type if entity_type else True
)
return lambda _: False
def _get_element(event: Dict[str, Any], path_spec: List[str]) -> Any:
try:
for p in path_spec:
event = event.get(p, {})
if not event:
return None
return event
except Exception as e:
print(event)
raise e
def _element_matches_pattern(
event: Dict[str, Any], path_spec: List[str], pattern: str
) -> Tuple[bool, bool]:
import re
element = _get_element(event, path_spec)
if element is None:
return (False, False)
else:
return (True, re.search(pattern, str(element)) is not None)
def assert_mcp_entity_urn(
filter: str, entity_type: str, regex_pattern: str, file: str
) -> int:
def get_path_spec_for_urn() -> List[str]:
return [MCPConstants.ENTITY_URN]
test_output = load_json_file(file)
if isinstance(test_output, list):
path_spec = get_path_spec_for_urn()
filter_operator = _get_filter(mcp=True, entity_type=entity_type)
filtered_events = [
(x, _element_matches_pattern(x, path_spec, regex_pattern))
for x in test_output
if filter_operator(x)
]
failed_events = [y for y in filtered_events if not y[1][0] or not y[1][1]]
if failed_events:
raise Exception("Failed to match events", failed_events)
return len(filtered_events)
else:
raise Exception(
f"Did not expect the file {file} to not contain a list of items"
)
def _get_mce_urn_path_spec(entity_type: str) -> List[str]:
if entity_type == EntityType.DATASET:
return [
MCEConstants.PROPOSED_SNAPSHOT,
MCEConstants.DATASET_SNAPSHOT_CLASS,
"urn",
]
raise Exception(f"Not implemented for entity_type: {entity_type}")
def _get_mcp_urn_path_spec() -> List[str]:
return [MCPConstants.ENTITY_URN]
def assert_mce_entity_urn(
filter: str, entity_type: str, regex_pattern: str, file: str
) -> int:
test_output = load_json_file(file)
if isinstance(test_output, list):
path_spec = _get_mce_urn_path_spec(entity_type)
filter_operator = _get_filter(mce=True)
filtered_events = [
(x, _element_matches_pattern(x, path_spec, regex_pattern))
for x in test_output
if filter_operator(x)
]
failed_events = [y for y in filtered_events if not y[1][0] or not y[1][1]]
if failed_events:
raise Exception(
"Failed to match events: {json.dumps(failed_events, indent=2)}"
)
return len(filtered_events)
else:
raise Exception(
f"Did not expect the file {file} to not contain a list of items"
)
def assert_for_each_entity(
entity_type: str, aspect_name: str, aspect_field_matcher: Dict[str, str], file: str
) -> int:
"""Assert that an aspect name with the desired fields exists for each entity urn"""
test_output = load_json_file(file)
assert isinstance(test_output, list)
# mce urns
mce_urns = set(
[
_get_element(x, _get_mce_urn_path_spec(entity_type))
for x in test_output
if _get_filter(mce=True, entity_type=entity_type)(x)
]
)
mcp_urns = set(
[
_get_element(x, _get_mcp_urn_path_spec())
for x in test_output
if _get_filter(mcp=True, entity_type=entity_type)(x)
]
)
all_urns = mce_urns.union(mcp_urns)
# there should not be any None urns
assert None not in all_urns
aspect_map = {urn: None for urn in all_urns}
# iterate over all mcps
for o in [
mcp
for mcp in test_output
if _get_filter(mcp=True, entity_type=entity_type)(mcp)
]:
if o.get(MCPConstants.ASPECT_NAME) == aspect_name:
# load the inner aspect payload and assign to this urn
aspect_map[o[MCPConstants.ENTITY_URN]] = json.loads(
o.get(MCPConstants.ASPECT_VALUE, {}).get("value")
)
success: List[str] = []
failures: List[str] = []
for urn, aspect_val in aspect_map.items():
if aspect_val is not None:
for f in aspect_field_matcher:
assert aspect_field_matcher[f] == _get_element(
aspect_val, [f]
), f"urn: {urn} -> Field {f} must match value {aspect_field_matcher[f]}, found {_get_element(aspect_val, [f])}"
success.append(urn)
else:
print(f"Adding {urn} to failures")
failures.append(urn)
if success:
print(f"Succeeded on assertion for urns {success}")
if failures:
assert (
False
), f"Failed to find aspect_name {aspect_name} for urns {json.dumps(failures, indent=2)}"
return len(success)
|
[
"json.load",
"pprint.pformat",
"json.dumps",
"os.path.isfile",
"deepdiff.DeepDiff",
"logging.getLogger"
] |
[((219, 246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (236, 246), False, 'import logging\n'), ((2522, 2612), 'deepdiff.DeepDiff', 'deepdiff.DeepDiff', (['golden', 'output'], {'exclude_regex_paths': 'ignore_paths', 'ignore_order': '(True)'}), '(golden, output, exclude_regex_paths=ignore_paths,\n ignore_order=True)\n', (2539, 2612), False, 'import deepdiff\n'), ((3633, 3660), 'os.path.isfile', 'os.path.isfile', (['golden_path'], {}), '(golden_path)\n', (3647, 3660), False, 'import os\n'), ((1891, 1903), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1900, 1903), False, 'import json\n'), ((2898, 3001), 'deepdiff.DeepDiff', 'deepdiff.DeepDiff', (['clean_golden', 'clean_output'], {'exclude_regex_paths': 'ignore_paths', 'ignore_order': '(True)'}), '(clean_golden, clean_output, exclude_regex_paths=\n ignore_paths, ignore_order=True)\n', (2915, 3001), False, 'import deepdiff\n'), ((3287, 3307), 'pprint.pformat', 'pprint.pformat', (['diff'], {}), '(diff)\n', (3301, 3307), False, 'import pprint\n'), ((3325, 3343), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (3335, 3343), False, 'import json\n'), ((10420, 10450), 'json.dumps', 'json.dumps', (['failures'], {'indent': '(2)'}), '(failures, indent=2)\n', (10430, 10450), False, 'import json\n'), ((3170, 3190), 'pprint.pformat', 'pprint.pformat', (['diff'], {}), '(diff)\n', (3184, 3190), False, 'import pprint\n')]
|
from tkinter import *
from tkinter import messagebox
import Clases_Bracket_Nuevo
class ClaseUsuarioNuevo():
def VentanaUsuarioNuevo(v_main,nom_usuario,id_bracket):
v=Toplevel()
v.geometry("1030x700+%d+0" %((v.winfo_screenwidth() - 1030) / 2))
v.title("Bracket de Torneos")
frmUsuario=Frame(v)
frmUsuario.grid()
frmUsuario.place(in_=v,relx=.5,y=0)
lbluser=Label(frmUsuario,text="<NAME>")
lbluser.grid(row=0,column=0)
lblnom=Label(frmUsuario,text=nom_usuario)
lblnom.grid(row=1,column=0)
#-----Frame Grupos Izquierda-----
frmGruposIzq=Frame(v)
frmGruposIzq.grid()
frmGruposIzq.place(in_=v,relx=.5,rely=.5,anchor="c")
f=0
lblGrupoA=Label(frmGruposIzq,text="Grupo A")
lblGrupoA.grid(row=f,column=0,pady=10)
f+=1
btnA1=Clases_Bracket_Nuevo.BotonesGrupos("A",1,frmGruposIzq,text="")
btnA1.grid(row=f,column=0)
f+=1
btnA2=Clases_Bracket_Nuevo.BotonesGrupos("A",2,frmGruposIzq,text="")
btnA2.grid(row=f,column=0)
f+=1
btnA3=Clases_Bracket_Nuevo.BotonesGrupos("A",3,frmGruposIzq,text="")
btnA3.grid(row=f,column=0)
f+=1
btnA4=Clases_Bracket_Nuevo.BotonesGrupos("A",4,frmGruposIzq,text="")
btnA4.grid(row=f,column=0)
#-----------------------------Grupo B-------------------
f+=1
lblGrupoB=Label(frmGruposIzq,text="Grupo B")
lblGrupoB.grid(row=f,column=0,pady=10)
f+=1
btnB1=Clases_Bracket_Nuevo.BotonesGrupos("B",1,frmGruposIzq,text="")
btnB1.grid(row=f,column=0)
f+=1
btnB2=Clases_Bracket_Nuevo.BotonesGrupos("B",2,frmGruposIzq,text="")
btnB2.grid(row=f,column=0)
f+=1
btnB3=Clases_Bracket_Nuevo.BotonesGrupos("B",3,frmGruposIzq,text="")
btnB3.grid(row=f,column=0)
f+=1
btnB4=Clases_Bracket_Nuevo.BotonesGrupos("B",4,frmGruposIzq,text="")
btnB4.grid(row=f,column=0)
#-----------------------------Grupo C-------------------
f+=1
lblGrupoC=Label(frmGruposIzq,text="Grupo C")
lblGrupoC.grid(row=f,column=0,pady=10)
f+=1
btnC1=Clases_Bracket_Nuevo.BotonesGrupos("C",1,frmGruposIzq,text="")
btnC1.grid(row=f,column=0)
f+=1
btnC2=Clases_Bracket_Nuevo.BotonesGrupos("C",2,frmGruposIzq,text="")
btnC2.grid(row=f,column=0)
f+=1
btnC3=Clases_Bracket_Nuevo.BotonesGrupos("C",3,frmGruposIzq,text="")
btnC3.grid(row=f,column=0)
f+=1
btnC4=Clases_Bracket_Nuevo.BotonesGrupos("C",4,frmGruposIzq,text="")
btnC4.grid(row=f,column=0)
#-----------------------------Grupo D-------------------
f+=1
lblGrupoD=Label(frmGruposIzq,text="Grupo D")
lblGrupoD.grid(row=f,column=0,pady=10)
f+=1
btnD1=Clases_Bracket_Nuevo.BotonesGrupos("D",1,frmGruposIzq,text="")
btnD1.grid(row=f,column=0)
f+=1
btnD2=Clases_Bracket_Nuevo.BotonesGrupos("D",2,frmGruposIzq,text="")
btnD2.grid(row=f,column=0)
f+=1
btnD3=Clases_Bracket_Nuevo.BotonesGrupos("D",3,frmGruposIzq,text="")
btnD3.grid(row=f,column=0)
f+=1
btnD4=Clases_Bracket_Nuevo.BotonesGrupos("D",4,frmGruposIzq,text="")
btnD4.grid(row=f,column=0)
################################
################################
################################
#-----Frame Grupos Derecha-----
f=0
c=10
lblGrupoE=Label(frmGruposIzq,text="Grupo E")
lblGrupoE.grid(row=f,column=c,pady=10)
f+=1
btnE1=Clases_Bracket_Nuevo.BotonesGrupos("E",1,frmGruposIzq,text="")
btnE1.grid(row=f,column=c)
f+=1
btnE2=Clases_Bracket_Nuevo.BotonesGrupos("E",2,frmGruposIzq,text="")
btnE2.grid(row=f,column=c)
f+=1
btnE3=Clases_Bracket_Nuevo.BotonesGrupos("E",3,frmGruposIzq,text="")
btnE3.grid(row=f,column=c)
f+=1
btnE4=Clases_Bracket_Nuevo.BotonesGrupos("E",4,frmGruposIzq,text="")
btnE4.grid(row=f,column=c)
#-----------------------------Grupo F-------------------
f+=1
lblGrupoF=Label(frmGruposIzq,text="Grupo F")
lblGrupoF.grid(row=f,column=c,pady=10)
f+=1
btnF1=Clases_Bracket_Nuevo.BotonesGrupos("F",1,frmGruposIzq,text="")
btnF1.grid(row=f,column=c)
f+=1
btnF2=Clases_Bracket_Nuevo.BotonesGrupos("F",2,frmGruposIzq,text="")
btnF2.grid(row=f,column=c)
f+=1
btnF3=Clases_Bracket_Nuevo.BotonesGrupos("F",3,frmGruposIzq,text="")
btnF3.grid(row=f,column=c)
f+=1
btnF4=Clases_Bracket_Nuevo.BotonesGrupos("F",4,frmGruposIzq,text="")
btnF4.grid(row=f,column=c)
#-----------------------------Grupo G-------------------
f+=1
lblGrupoG=Label(frmGruposIzq,text="Grupo G")
lblGrupoG.grid(row=f,column=c,pady=10)
f+=1
btnG1=Clases_Bracket_Nuevo.BotonesGrupos("G",1,frmGruposIzq,text="")
btnG1.grid(row=f,column=c)
f+=1
btnG2=Clases_Bracket_Nuevo.BotonesGrupos("G",2,frmGruposIzq,text="")
btnG2.grid(row=f,column=c)
f+=1
btnG3=Clases_Bracket_Nuevo.BotonesGrupos("G",3,frmGruposIzq,text="")
btnG3.grid(row=f,column=c)
f+=1
btnG4=Clases_Bracket_Nuevo.BotonesGrupos("G",4,frmGruposIzq,text="")
btnG4.grid(row=f,column=c)
#-----------------------------Grupo H-------------------
f+=1
lblGrupoH=Label(frmGruposIzq,text="Grupo H")
lblGrupoH.grid(row=f,column=c,pady=10)
f+=1
btnH1=Clases_Bracket_Nuevo.BotonesGrupos("H",1,frmGruposIzq,text="")
btnH1.grid(row=f,column=c)
f+=1
btnH2=Clases_Bracket_Nuevo.BotonesGrupos("H",2,frmGruposIzq,text="")
btnH2.grid(row=f,column=c)
f+=1
btnH3=Clases_Bracket_Nuevo.BotonesGrupos("H",3,frmGruposIzq,text="")
btnH3.grid(row=f,column=c)
f+=1
btnH4=Clases_Bracket_Nuevo.BotonesGrupos("H",4,frmGruposIzq,text="")
btnH4.grid(row=f,column=c)
########################################################
########################################################
########################################################
#---------------------------Octavos A y B
f=0
c=1
lblOctavos=Label(frmGruposIzq,text="Octavos de Final")
lblOctavos.grid(row=f,column=c,pady=10)
f+=2
btnOctA1=Button(frmGruposIzq,text="OctA1")
btnOctA1.grid(row=f,column=c)
f+=1
btnOctB2=Button(frmGruposIzq,text="OctB2")
btnOctB2.grid(row=f,column=c)
f+=4
btnOctB1=Button(frmGruposIzq,text="OctB1")
btnOctB1.grid(row=f,column=c)
f+=1
btnOctA2=Button(frmGruposIzq,text="OctA2")
btnOctA2.grid(row=f,column=c)
#---------------------------Octavos C y D
f=12
c=1
btnOctC1=Button(frmGruposIzq,text="OctC1")
btnOctC1.grid(row=f,column=c)
f+=1
btnOctD2=Button(frmGruposIzq,text="OctD2")
btnOctD2.grid(row=f,column=c)
f+=4
btnOctD1=Button(frmGruposIzq,text="OctD1")
btnOctD1.grid(row=f,column=c)
f+=1
btnOctC2=Button(frmGruposIzq,text="OctC2")
btnOctC2.grid(row=f,column=c)
##################################################Derecha
#---------------------------Octavos E y F
f=0
c=9
lblOctavos=Label(frmGruposIzq,text="Octavos de Final")
lblOctavos.grid(row=f,column=c,pady=10)
f+=2
btnOctE1=Button(frmGruposIzq,text="OctE1")
btnOctE1.grid(row=f,column=c)
f+=1
btnOctF2=Button(frmGruposIzq,text="OctF2")
btnOctF2.grid(row=f,column=c)
f+=4
btnOctF1=Button(frmGruposIzq,text="OctF1")
btnOctF1.grid(row=f,column=c)
f+=1
btnOctE2=Button(frmGruposIzq,text="OctE2")
btnOctE2.grid(row=f,column=c)
#---------------------------Octavos G y H
f=12
c=9
btnOctG1=Button(frmGruposIzq,text="OctG1")
btnOctG1.grid(row=f,column=c)
f+=1
btnOctH2=Button(frmGruposIzq,text="OctH2")
btnOctH2.grid(row=f,column=c)
f+=4
btnOctH1=Button(frmGruposIzq,text="OctH1")
btnOctH1.grid(row=f,column=c)
f+=1
btnOctG2=Button(frmGruposIzq,text="OctG2")
btnOctG2.grid(row=f,column=c)
########################
#########################Configuraciones de Botones Grupos
btnA1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoA(btnOctA1,
btnOctA2,
btnA1.cget("text")))
btnA2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoA(btnOctA1,
btnOctA2,
btnA2.cget("text")))
btnA3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoA(btnOctA1,
btnOctA2,
btnA3.cget("text")))
btnA4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoA(btnOctA1,
btnOctA2,
btnA4.cget("text")))
############################################grupoB#############
btnB1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoB(btnOctB1,
btnOctB2,
btnB1.cget("text")))
btnB2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoB(btnOctB1,
btnOctB2,
btnB2.cget("text")))
btnB3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoB(btnOctB1,
btnOctB2,
btnB3.cget("text")))
btnB4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoB(btnOctB1,
btnOctB2,
btnB4.cget("text")))
############################################grupoC#############
btnC1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoC(btnOctC1,
btnOctC2,
btnC1.cget("text")))
btnC2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoC(btnOctC1,
btnOctC2,
btnC2.cget("text")))
btnC3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoC(btnOctC1,
btnOctC2,
btnC3.cget("text")))
btnC4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoC(btnOctC1,
btnOctC2,
btnC4.cget("text")))
############################################grupoD#############
btnD1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoD(btnOctD1,
btnOctD2,
btnD1.cget("text")))
btnD2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoD(btnOctD1,
btnOctD2,
btnD2.cget("text")))
btnD3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoD(btnOctD1,
btnOctD2,
btnD3.cget("text")))
btnD4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoD(btnOctD1,
btnOctD2,
btnD4.cget("text")))
############################################grupoE#############
btnE1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoE(btnOctE1,
btnOctE2,
btnE1.cget("text")))
btnE2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoE(btnOctE1,
btnOctE2,
btnE2.cget("text")))
btnE3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoE(btnOctE1,
btnOctE2,
btnE3.cget("text")))
btnE4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoE(btnOctE1,
btnOctE2,
btnE4.cget("text")))
############################################grupoF#############
btnF1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoF(btnOctF1,
btnOctF2,
btnF1.cget("text")))
btnF2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoF(btnOctF1,
btnOctF2,
btnF2.cget("text")))
btnF3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoF(btnOctF1,
btnOctF2,
btnF3.cget("text")))
btnF4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoF(btnOctF1,
btnOctF2,
btnF4.cget("text")))
############################################grupoG#############
btnG1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoG(btnOctG1,
btnOctG2,
btnG1.cget("text")))
btnG2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoG(btnOctG1,
btnOctG2,
btnG2.cget("text")))
btnG3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoG(btnOctG1,
btnOctG2,
btnG3.cget("text")))
btnG4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoG(btnOctG1,
btnOctG2,
btnG4.cget("text")))
############################################grupoH#############
btnH1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoH(btnOctH1,
btnOctH2,
btnH1.cget("text")))
btnH2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoH(btnOctH1,
btnOctH2,
btnH2.cget("text")))
btnH3.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoH(btnOctH1,
btnOctH2,
btnH3.cget("text")))
btnH4.configure(command=lambda:Clases_Bracket_Nuevo.AccionesOctavos.AccionGrupoH(btnOctH1,
btnOctH2,
btnH4.cget("text")))
########################################################
########################################################
########################################################
#---------------------------Cuartos A y B
f=0
c=2
lblCuartos=Label(frmGruposIzq,text="Cuartos de Final")
lblCuartos.grid(row=f,column=c,pady=10)
f+=2
btnCuartosA1=Button(frmGruposIzq,text="CuartosA1")
btnCuartosA1.grid(row=f,column=c,rowspan=2)
f+=5
btnCuartosB1=Button(frmGruposIzq,text="CuartosB1")
btnCuartosB1.grid(row=f,column=c,rowspan=2)
#---------------------------Cuartos C y D
f=12
c=2
btnCuartosC1=Button(frmGruposIzq,text="CuartosC1")
btnCuartosC1.grid(row=f,column=c,rowspan=2)
f+=5
btnCuartosD1=Button(frmGruposIzq,text="CuartosD1")
btnCuartosD1.grid(row=f,column=c,rowspan=2)
#############################################################Derecha
#---------------------------Cuartos E y F
f=0
c=8
lblCuartos=Label(frmGruposIzq,text="Cuartos de Final")
lblCuartos.grid(row=f,column=c,pady=10)
f+=2
btnCuartosE1=Button(frmGruposIzq,text="CuartosE1")
btnCuartosE1.grid(row=f,column=c,rowspan=2)
f+=5
btnCuartosF1=Button(frmGruposIzq,text="CuartosF1")
btnCuartosF1.grid(row=f,column=c,rowspan=2)
#---------------------------Cuartos G y H
f=12
c=8
btnCuartosG1=Button(frmGruposIzq,text="CuartosG1")
btnCuartosG1.grid(row=f,column=c,rowspan=2)
f+=5
btnCuartosH1=Button(frmGruposIzq,text="CuartosH1")
btnCuartosH1.grid(row=f,column=c,rowspan=2)
########################
#########################Configuraciones de Botones Octavos
btnOctA1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctAyB1(btnCuartosA1,
btnOctA1.cget("text")))
btnOctB2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctAyB1(btnCuartosA1,
btnOctB2.cget("text")))
btnOctB1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctAyB2(btnCuartosB1,
btnOctB1.cget("text")))
btnOctA2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctAyB2(btnCuartosB1,
btnOctA2.cget("text")))
#############################BotonesC y D
btnOctC1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctCyD1(btnCuartosC1,
btnOctC1.cget("text")))
btnOctD2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctCyD1(btnCuartosC1,
btnOctD2.cget("text")))
btnOctD1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctCyD2(btnCuartosD1,
btnOctD1.cget("text")))
btnOctC2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctCyD2(btnCuartosD1,
btnOctC2.cget("text")))
#############################Botones E y F
btnOctE1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctEyF1(btnCuartosE1,
btnOctE1.cget("text")))
btnOctF2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctEyF1(btnCuartosE1,
btnOctF2.cget("text")))
btnOctF1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctEyF2(btnCuartosF1,
btnOctF1.cget("text")))
btnOctE2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctEyF2(btnCuartosF1,
btnOctE2.cget("text")))
#############################Botones G y H
btnOctG1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctGyH1(btnCuartosG1,
btnOctG1.cget("text")))
btnOctH2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctGyH1(btnCuartosG1,
btnOctH2.cget("text")))
btnOctH1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctGyH2(btnCuartosH1,
btnOctH1.cget("text")))
btnOctG2.configure(command=lambda:Clases_Bracket_Nuevo.AccionesCuartos.AccionOctGyH2(btnCuartosH1,
btnOctG2.cget("text")))
##################################################################
##################################################################
##################################################################
##################################################################
#---------------------------Semifinal A y B
f=0
c=3
lblCuartos=Label(frmGruposIzq,text="Semifinales")
lblCuartos.grid(row=f,column=c,pady=10)
f+=5
btnSemifinalA1=Button(frmGruposIzq,text="SemifinalA1")
btnSemifinalA1.grid(row=f,column=c,rowspan=1)
f+=10
btnSemifinalB1=Button(frmGruposIzq,text="SemifinalB1")
btnSemifinalB1.grid(row=f,column=c,rowspan=1)
#############################################################Derecha
#---------------------------Semifinal C y D
f=0
c=7
lblCuartos=Label(frmGruposIzq,text="Semifinales")
lblCuartos.grid(row=f,column=c,pady=10)
f+=5
btnSemifinalC1=Button(frmGruposIzq,text="SemifinalC1")
btnSemifinalC1.grid(row=f,column=c,rowspan=1)
f+=10
btnSemifinalD1=Button(frmGruposIzq,text="SemifinalD1")
btnSemifinalD1.grid(row=f,column=c,rowspan=1)
########################
#########################Configuraciones de Botones Cuartos
btnCuartosA1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosAyB1(btnSemifinalA1,
btnCuartosA1.cget("text")))
btnCuartosB1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosAyB1(btnSemifinalA1,
btnCuartosB1.cget("text")))
#############################BotonesC y D
btnCuartosC1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosCyD1(btnSemifinalB1,
btnCuartosC1.cget("text")))
btnCuartosD1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosCyD1(btnSemifinalB1,
btnCuartosD1.cget("text")))
#############################Botones E y F
btnCuartosE1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosEyF1(btnSemifinalC1,
btnCuartosE1.cget("text")))
btnCuartosF1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosEyF1(btnSemifinalC1,
btnCuartosF1.cget("text")))
#############################Botones G y H
btnCuartosG1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosGyH1(btnSemifinalD1,
btnCuartosG1.cget("text")))
btnCuartosH1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesSemifinal.AccionCuartosGyH1(btnSemifinalD1,
btnCuartosH1.cget("text")))
##################################################################
##################################################################
##################################################################
##################################################################
#---------------------------Final A
f=0
c=4
lblCuartos=Label(frmGruposIzq,text="Final")
lblCuartos.grid(row=f,column=c,pady=10)
f+=10
btnFinalA1=Button(frmGruposIzq,text="FinalA1")
btnFinalA1.grid(row=f,column=c,rowspan=1)
#############################################################Derecha
#---------------------------Final B
f=0
c=6
lblCuartos=Label(frmGruposIzq,text="Final")
lblCuartos.grid(row=f,column=c,pady=10)
f+=10
btnFinalB1=Button(frmGruposIzq,text="FinalB1")
btnFinalB1.grid(row=f,column=c,rowspan=1)
########################
#########################Configuraciones de Botones Semifinal
btnSemifinalA1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesFinal.AccionSemifinalAyB1(btnFinalA1,
btnSemifinalA1.cget("text")))
btnSemifinalB1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesFinal.AccionSemifinalAyB1(btnFinalA1,
btnSemifinalB1.cget("text")))
btnSemifinalC1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesFinal.AccionSemifinalCyD1(btnFinalB1,
btnSemifinalC1.cget("text")))
btnSemifinalD1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesFinal.AccionSemifinalCyD1(btnFinalB1,
btnSemifinalD1.cget("text")))
##################################################################
##################################################################
##################################################################
##################################################################
#---------------------------Final A
f=0
c=5
lblCuartos=Label(frmGruposIzq,text="Ganador")
lblCuartos.grid(row=f,column=c,pady=10,padx=60)
f+=10
btnGanador=Button(frmGruposIzq,text="--Ganador--")
btnGanador.grid(row=f,column=c,rowspan=1)
########################
#########################Configuraciones de Botones Final
btnFinalA1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesGanador.AccionFinalAyB1(btnGanador,
btnFinalA1.cget("text")))
btnFinalB1.configure(command=lambda:Clases_Bracket_Nuevo.AccionesGanador.AccionFinalAyB1(btnGanador,
btnFinalB1.cget("text")))
btnGanador.configure(command=lambda:Clases_Bracket_Nuevo.Confirmar.VerificarBracket(btnOctA1.cget("text"),
btnOctB2.cget("text"),
btnOctB1.cget("text"),
btnOctA2.cget("text"),
##########################################################################
btnOctC1.cget("text"),
btnOctD2.cget("text"),
btnOctD1.cget("text"),
btnOctC2.cget("text"),
##########################################################################
btnOctE1.cget("text"),
btnOctF2.cget("text"),
btnOctF1.cget("text"),
btnOctE2.cget("text"),
##########################################################################
btnOctG1.cget("text"),
btnOctH2.cget("text"),
btnOctH1.cget("text"),
btnOctG2.cget("text"),
##########################################################################----------------------------------
btnCuartosA1.cget("text"),
btnCuartosB1.cget("text"),
##########################################################################
btnCuartosC1.cget("text"),
btnCuartosD1.cget("text"),
##########################################################################
btnCuartosE1.cget("text"),
btnCuartosF1.cget("text"),
##########################################################################
btnCuartosG1.cget("text"),
btnCuartosH1.cget("text"),
##########################################################################----------------------------------
btnSemifinalA1.cget("text"),
btnSemifinalB1.cget("text"),
##########################################################################
btnSemifinalC1.cget("text"),
btnSemifinalD1.cget("text"),
##########################################################################----------------------------------
btnFinalA1.cget("text"),
btnFinalB1.cget("text"),
##########################################################################----------------------------------
btnGanador.cget("text"),
##########################################################################----------------------------------
v,
##########################################################################----------------------------------
id_bracket
))
#-----------------------Cerrar con equis
def cerrar_con_equis():
messagebox.showinfo("BM","Debe de guardar su bracket primero")
v.protocol('WM_DELETE_WINDOW', cerrar_con_equis)
v.mainloop()
|
[
"Clases_Bracket_Nuevo.BotonesGrupos",
"tkinter.messagebox.showinfo"
] |
[((951, 1016), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""A"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('A', 1, frmGruposIzq, text='')\n", (985, 1016), False, 'import Clases_Bracket_Nuevo\n'), ((1081, 1146), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""A"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('A', 2, frmGruposIzq, text='')\n", (1115, 1146), False, 'import Clases_Bracket_Nuevo\n'), ((1211, 1276), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""A"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('A', 3, frmGruposIzq, text='')\n", (1245, 1276), False, 'import Clases_Bracket_Nuevo\n'), ((1341, 1406), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""A"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('A', 4, frmGruposIzq, text='')\n", (1375, 1406), False, 'import Clases_Bracket_Nuevo\n'), ((1653, 1718), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""B"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('B', 1, frmGruposIzq, text='')\n", (1687, 1718), False, 'import Clases_Bracket_Nuevo\n'), ((1783, 1848), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""B"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('B', 2, frmGruposIzq, text='')\n", (1817, 1848), False, 'import Clases_Bracket_Nuevo\n'), ((1913, 1978), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""B"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('B', 3, frmGruposIzq, text='')\n", (1947, 1978), False, 'import Clases_Bracket_Nuevo\n'), ((2043, 2108), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""B"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('B', 4, frmGruposIzq, text='')\n", (2077, 2108), False, 'import Clases_Bracket_Nuevo\n'), ((2357, 2422), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""C"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('C', 1, frmGruposIzq, text='')\n", (2391, 2422), False, 'import Clases_Bracket_Nuevo\n'), ((2487, 2552), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""C"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('C', 2, frmGruposIzq, text='')\n", (2521, 2552), False, 'import Clases_Bracket_Nuevo\n'), ((2617, 2682), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""C"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('C', 3, frmGruposIzq, text='')\n", (2651, 2682), False, 'import Clases_Bracket_Nuevo\n'), ((2747, 2812), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""C"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('C', 4, frmGruposIzq, text='')\n", (2781, 2812), False, 'import Clases_Bracket_Nuevo\n'), ((3061, 3126), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""D"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('D', 1, frmGruposIzq, text='')\n", (3095, 3126), False, 'import Clases_Bracket_Nuevo\n'), ((3191, 3256), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""D"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('D', 2, frmGruposIzq, text='')\n", (3225, 3256), False, 'import Clases_Bracket_Nuevo\n'), ((3321, 3386), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""D"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('D', 3, frmGruposIzq, text='')\n", (3355, 3386), False, 'import Clases_Bracket_Nuevo\n'), ((3451, 3516), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""D"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('D', 4, frmGruposIzq, text='')\n", (3485, 3516), False, 'import Clases_Bracket_Nuevo\n'), ((3879, 3944), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""E"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('E', 1, frmGruposIzq, text='')\n", (3913, 3944), False, 'import Clases_Bracket_Nuevo\n'), ((4009, 4074), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""E"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('E', 2, frmGruposIzq, text='')\n", (4043, 4074), False, 'import Clases_Bracket_Nuevo\n'), ((4139, 4204), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""E"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('E', 3, frmGruposIzq, text='')\n", (4173, 4204), False, 'import Clases_Bracket_Nuevo\n'), ((4269, 4334), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""E"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('E', 4, frmGruposIzq, text='')\n", (4303, 4334), False, 'import Clases_Bracket_Nuevo\n'), ((4583, 4648), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""F"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('F', 1, frmGruposIzq, text='')\n", (4617, 4648), False, 'import Clases_Bracket_Nuevo\n'), ((4713, 4778), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""F"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('F', 2, frmGruposIzq, text='')\n", (4747, 4778), False, 'import Clases_Bracket_Nuevo\n'), ((4843, 4908), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""F"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('F', 3, frmGruposIzq, text='')\n", (4877, 4908), False, 'import Clases_Bracket_Nuevo\n'), ((4973, 5038), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""F"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('F', 4, frmGruposIzq, text='')\n", (5007, 5038), False, 'import Clases_Bracket_Nuevo\n'), ((5287, 5352), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""G"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('G', 1, frmGruposIzq, text='')\n", (5321, 5352), False, 'import Clases_Bracket_Nuevo\n'), ((5417, 5482), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""G"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('G', 2, frmGruposIzq, text='')\n", (5451, 5482), False, 'import Clases_Bracket_Nuevo\n'), ((5547, 5612), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""G"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('G', 3, frmGruposIzq, text='')\n", (5581, 5612), False, 'import Clases_Bracket_Nuevo\n'), ((5677, 5742), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""G"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('G', 4, frmGruposIzq, text='')\n", (5711, 5742), False, 'import Clases_Bracket_Nuevo\n'), ((5991, 6056), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""H"""', '(1)', 'frmGruposIzq'], {'text': '""""""'}), "('H', 1, frmGruposIzq, text='')\n", (6025, 6056), False, 'import Clases_Bracket_Nuevo\n'), ((6121, 6186), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""H"""', '(2)', 'frmGruposIzq'], {'text': '""""""'}), "('H', 2, frmGruposIzq, text='')\n", (6155, 6186), False, 'import Clases_Bracket_Nuevo\n'), ((6251, 6316), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""H"""', '(3)', 'frmGruposIzq'], {'text': '""""""'}), "('H', 3, frmGruposIzq, text='')\n", (6285, 6316), False, 'import Clases_Bracket_Nuevo\n'), ((6381, 6446), 'Clases_Bracket_Nuevo.BotonesGrupos', 'Clases_Bracket_Nuevo.BotonesGrupos', (['"""H"""', '(4)', 'frmGruposIzq'], {'text': '""""""'}), "('H', 4, frmGruposIzq, text='')\n", (6415, 6446), False, 'import Clases_Bracket_Nuevo\n'), ((33140, 33203), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""BM"""', '"""Debe de guardar su bracket primero"""'], {}), "('BM', 'Debe de guardar su bracket primero')\n", (33159, 33203), False, 'from tkinter import messagebox\n')]
|
#!/usr/bin/env python
import argparse
import os
import glob
import collections
import tensorflow as tf
import numpy as np
from tensorflow.keras import Model
# 100k
# USER_SHAPE = 943
# ITEM_SHAPE = 1682
# POSTFIX = "-100k"
# 25m
USER_SHAPE = 162550 # 162541
ITEM_SHAPE = 209180 # 209171
POSTFIX = "-25m"
tf.enable_eager_execution()
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def load(data_dir):
filenames = tf.io.gfile.glob(os.path.join(data_dir, "part-*"))
# filenames = glob.glob(os.path.join("./", "train-dataset_part-*"))
print(filenames)
raw_dataset = tf.data.TFRecordDataset(filenames)
return raw_dataset
def read_tfrecord_fn(example_proto):
features = {"user": tf.FixedLenFeature((), tf.int64, default_value=0),
"item": tf.FixedLenFeature((), tf.int64, default_value=0),
"rating": tf.FixedLenFeature((), tf.float32, default_value=0)}
parsed_features = tf.io.parse_single_example(example_proto, features)
return parsed_features['user'], parsed_features['item'], parsed_features['rating']
def build_indices(dataset):
indices = []
values = []
cnt = 0
for arr in dataset:
ts = tf.stack(values=[arr[0], arr[1]], axis=0)
indices.append(ts)
values.append(arr[2])
if cnt % 10000 == 0:
print("load data: ", cnt)
cnt += 1
return indices, values
def build_rating_sparse_tensor(tfrecord_dataset):
dataset = tfrecord_dataset.map(read_tfrecord_fn)
indices, values = build_indices(dataset)
print(indices)
print(values)
st = tf.sparse.SparseTensor(
indices=tf.stack(values = indices, axis = 0),
values=tf.stack(values = values, axis = 0),
dense_shape=[USER_SHAPE, ITEM_SHAPE])
print(st)
return st
class MyModel(Model):
def __init__(self, embed_dim):
# Initialize the embeddings using a normal distribution.
super(MyModel, self).__init__()
self.embedding_dim = embed_dim
self.U = tf.Variable(tf.random.normal(
[USER_SHAPE, embed_dim], stddev=1.), trainable=True)
self.V = tf.Variable(tf.random.normal(
[ITEM_SHAPE, embed_dim], stddev=1.), trainable=True)
def build_model(train_dataset, eval_dataset, max_iterations, learning_rate=1, embed_dim = 3):
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_dataset)
A_test = build_rating_sparse_tensor(eval_dataset)
model = MyModel(embed_dim)
train_loss = tf.keras.metrics.Mean(name='train_loss')
test_loss = tf.keras.metrics.Mean(name='test_loss')
def train_step(A_train, learning_rate):
with tf.GradientTape(persistent=True) as tape:
predictions = tf.reduce_sum(
tf.gather(model.U, A_train.indices[:, 0]) *
tf.gather(model.V, A_train.indices[:, 1]),
axis=1)
loss_obj = tf.keras.losses.MSE(A_train.values, predictions)
gradients = tape.gradient(loss_obj, model.trainable_variables)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
# print(model.trainable_variables)
train_loss(loss_obj)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
def test_step(A_test):
with tf.GradientTape(persistent=True) as tape:
predictions = tf.reduce_sum(
tf.gather(model.U, A_test.indices[:, 0]) *
tf.gather(model.V, A_test.indices[:, 1]),
axis=1)
loss_obj = tf.keras.losses.MSE(A_test.values, predictions)
test_loss(loss_obj)
for i in range(max_iterations):
train_loss.reset_states()
test_loss.reset_states()
train_step(A_train, learning_rate)
test_step(A_test)
print(
f'Epoch {i + 1}, '
f'Train Loss: {train_loss.result()}, '
f'Test Loss: {test_loss.result()}, '
)
def run(work_dir, max_iterations):
print(args.work_dir)
train_dataset = load(os.path.join(args.work_dir, 'train-dataset' + POSTFIX))
eval_dataset = load(os.path.join(args.work_dir, 'eval-dataset' + POSTFIX))
model = build_model(train_dataset, eval_dataset, max_iterations)
if __name__ == '__main__':
"""Main function called by AI Platform."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--work-dir',
required=False,
default='gs://ahsu-movielens',
help='Directory for staging and working files. '
'This can be a Google Cloud Storage path.')
parser.add_argument(
'--max_iterations',
type=int,
default=1000,
help='Number of iterations to train the model')
args = parser.parse_args()
run(
args.work_dir,
args.max_iterations)
|
[
"argparse.ArgumentParser",
"tensorflow.data.TFRecordDataset",
"os.path.join",
"tensorflow.keras.metrics.Mean",
"tensorflow.random.normal",
"tensorflow.keras.losses.MSE",
"tensorflow.keras.optimizers.SGD",
"tensorflow.io.parse_single_example",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_logical_devices",
"tensorflow.gather",
"tensorflow.stack",
"tensorflow.debugging.set_log_device_placement",
"tensorflow.enable_eager_execution",
"tensorflow.FixedLenFeature",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.GradientTape"
] |
[((312, 339), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (337, 339), True, 'import tensorflow as tf\n'), ((341, 384), 'tensorflow.debugging.set_log_device_placement', 'tf.debugging.set_log_device_placement', (['(True)'], {}), '(True)\n', (378, 384), True, 'import tensorflow as tf\n'), ((392, 443), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (436, 443), True, 'import tensorflow as tf\n'), ((1045, 1079), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['filenames'], {}), '(filenames)\n', (1068, 1079), True, 'import tensorflow as tf\n'), ((1383, 1434), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (1409, 1434), True, 'import tensorflow as tf\n'), ((2902, 2942), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), "(name='train_loss')\n", (2923, 2942), True, 'import tensorflow as tf\n'), ((2957, 2996), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""test_loss"""'}), "(name='test_loss')\n", (2978, 2996), True, 'import tensorflow as tf\n'), ((4557, 4636), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (4580, 4636), False, 'import argparse\n'), ((622, 672), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (665, 672), True, 'import tensorflow as tf\n'), ((906, 938), 'os.path.join', 'os.path.join', (['data_dir', '"""part-*"""'], {}), "(data_dir, 'part-*')\n", (918, 938), False, 'import os\n'), ((1161, 1210), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1179, 1210), True, 'import tensorflow as tf\n'), ((1234, 1283), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1252, 1283), True, 'import tensorflow as tf\n'), ((1309, 1360), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.float32'], {'default_value': '(0)'}), '((), tf.float32, default_value=0)\n', (1327, 1360), True, 'import tensorflow as tf\n'), ((1620, 1661), 'tensorflow.stack', 'tf.stack', ([], {'values': '[arr[0], arr[1]]', 'axis': '(0)'}), '(values=[arr[0], arr[1]], axis=0)\n', (1628, 1661), True, 'import tensorflow as tf\n'), ((3395, 3447), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3418, 3447), True, 'import tensorflow as tf\n'), ((4271, 4325), 'os.path.join', 'os.path.join', (['args.work_dir', "('train-dataset' + POSTFIX)"], {}), "(args.work_dir, 'train-dataset' + POSTFIX)\n", (4283, 4325), False, 'import os\n'), ((4349, 4402), 'os.path.join', 'os.path.join', (['args.work_dir', "('eval-dataset' + POSTFIX)"], {}), "(args.work_dir, 'eval-dataset' + POSTFIX)\n", (4361, 4402), False, 'import os\n'), ((551, 602), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (591, 602), True, 'import tensorflow as tf\n'), ((2034, 2066), 'tensorflow.stack', 'tf.stack', ([], {'values': 'indices', 'axis': '(0)'}), '(values=indices, axis=0)\n', (2042, 2066), True, 'import tensorflow as tf\n'), ((2085, 2116), 'tensorflow.stack', 'tf.stack', ([], {'values': 'values', 'axis': '(0)'}), '(values=values, axis=0)\n', (2093, 2116), True, 'import tensorflow as tf\n'), ((2404, 2457), 'tensorflow.random.normal', 'tf.random.normal', (['[USER_SHAPE, embed_dim]'], {'stddev': '(1.0)'}), '([USER_SHAPE, embed_dim], stddev=1.0)\n', (2420, 2457), True, 'import tensorflow as tf\n'), ((2508, 2561), 'tensorflow.random.normal', 'tf.random.normal', (['[ITEM_SHAPE, embed_dim]'], {'stddev': '(1.0)'}), '([ITEM_SHAPE, embed_dim], stddev=1.0)\n', (2524, 2561), True, 'import tensorflow as tf\n'), ((3049, 3081), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (3064, 3081), True, 'import tensorflow as tf\n'), ((3262, 3310), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['A_train.values', 'predictions'], {}), '(A_train.values, predictions)\n', (3281, 3310), True, 'import tensorflow as tf\n'), ((3620, 3652), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (3635, 3652), True, 'import tensorflow as tf\n'), ((3831, 3878), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['A_test.values', 'predictions'], {}), '(A_test.values, predictions)\n', (3850, 3878), True, 'import tensorflow as tf\n'), ((3134, 3175), 'tensorflow.gather', 'tf.gather', (['model.U', 'A_train.indices[:, 0]'], {}), '(model.U, A_train.indices[:, 0])\n', (3143, 3175), True, 'import tensorflow as tf\n'), ((3186, 3227), 'tensorflow.gather', 'tf.gather', (['model.V', 'A_train.indices[:, 1]'], {}), '(model.V, A_train.indices[:, 1])\n', (3195, 3227), True, 'import tensorflow as tf\n'), ((3705, 3745), 'tensorflow.gather', 'tf.gather', (['model.U', 'A_test.indices[:, 0]'], {}), '(model.U, A_test.indices[:, 0])\n', (3714, 3745), True, 'import tensorflow as tf\n'), ((3756, 3796), 'tensorflow.gather', 'tf.gather', (['model.V', 'A_test.indices[:, 1]'], {}), '(model.V, A_test.indices[:, 1])\n', (3765, 3796), True, 'import tensorflow as tf\n')]
|
# -*- coding:utf-8 -*-
# Created by LuoJie at 12/12/19
import tensorflow as tf
import pandas as pd
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from pgn.batcher import beam_test_batch_generator
from pgn.model import PGN
from pgn.test_helper import beam_decode, greedy_decode
from utils.config import TEST_DATA, PGN_CKPT
from utils.config_gpu import config_gpu
from utils.params import get_params
from utils.saveLoader import Vocab
from pgn.batcher import batcher
def test(params):
assert params["mode"].lower() in ["test", "eval"], "change training mode to 'test' or 'eval'"
assert params["beam_size"] == params["batch_size"], "Beam size must be equal to batch_size, change the params"
# GPU资源配置
config_gpu()
print("Test the model ...")
model = PGN(params)
print("Creating the vocab ...")
vocab = Vocab(params["vocab_path"], params["vocab_size"])
# ds = batcher(vocab, params)
print("Creating the checkpoint manager")
checkpoint = tf.train.Checkpoint(PGN=model)
checkpoint_manager = tf.train.CheckpointManager(checkpoint, PGN_CKPT, max_to_keep=5)
# checkpoint_manager = tf.train.CheckpointManager(checkpoint, TEMP_CKPT, max_to_keep=5)
# temp_ckpt = os.path.join(TEMP_CKPT, "ckpt-5")
# checkpoint.restore(temp_ckpt)
checkpoint.restore(checkpoint_manager.latest_checkpoint)
if checkpoint_manager.latest_checkpoint:
print("Restored from {}".format(checkpoint_manager.latest_checkpoint))
else:
print("Initializing from scratch.")
print("Model restored")
if params['greedy_decode']:
params['batch_size'] = 512
results = predict_result(model, params, vocab, params['result_save_path'])
else:
b = beam_test_batch_generator(params["beam_size"])
results = []
for batch in b:
best_hyp = beam_decode(model, batch, vocab, params)
results.append(best_hyp.abstract)
save_predict_result(results, params['result_save_path'])
print('save result to :{}'.format(params['result_save_path']))
return results
def predict_result(model, params, vocab, result_save_path):
dataset = batcher(vocab, params)
# 预测结果
results = greedy_decode(model, dataset, vocab, params)
results = list(map(lambda x: x.replace(" ",""), results))
# 保存结果
save_predict_result(results, result_save_path)
return results
def save_predict_result(results, result_save_path):
# 读取结果
test_df = pd.read_csv(TEST_DATA)
# 填充结果
test_df['Prediction'] = results
# 提取ID和预测结果两列
test_df = test_df[['QID', 'Prediction']]
# 保存结果.
test_df.to_csv(result_save_path, index=None, sep=',')
if __name__ == '__main__':
# 获得参数
params = get_params()
params["mode"] = "test"
params["batch_size"] = params["beam_size"]
# 获得参数
results = test(params)
|
[
"pgn.test_helper.greedy_decode",
"pgn.model.PGN",
"pgn.test_helper.beam_decode",
"utils.config_gpu.config_gpu",
"tensorflow.train.Checkpoint",
"pandas.read_csv",
"os.path.dirname",
"sys.path.insert",
"utils.params.get_params",
"pgn.batcher.batcher",
"inspect.currentframe",
"utils.saveLoader.Vocab",
"tensorflow.train.CheckpointManager",
"pgn.batcher.beam_test_batch_generator"
] |
[((224, 251), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (239, 251), False, 'import os, sys, inspect\n'), ((252, 281), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (267, 281), False, 'import os, sys, inspect\n'), ((854, 866), 'utils.config_gpu.config_gpu', 'config_gpu', ([], {}), '()\n', (864, 866), False, 'from utils.config_gpu import config_gpu\n'), ((913, 924), 'pgn.model.PGN', 'PGN', (['params'], {}), '(params)\n', (916, 924), False, 'from pgn.model import PGN\n'), ((974, 1023), 'utils.saveLoader.Vocab', 'Vocab', (["params['vocab_path']", "params['vocab_size']"], {}), "(params['vocab_path'], params['vocab_size'])\n", (979, 1023), False, 'from utils.saveLoader import Vocab\n'), ((1121, 1151), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'PGN': 'model'}), '(PGN=model)\n', (1140, 1151), True, 'import tensorflow as tf\n'), ((1178, 1241), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint', 'PGN_CKPT'], {'max_to_keep': '(5)'}), '(checkpoint, PGN_CKPT, max_to_keep=5)\n', (1204, 1241), True, 'import tensorflow as tf\n'), ((2299, 2321), 'pgn.batcher.batcher', 'batcher', (['vocab', 'params'], {}), '(vocab, params)\n', (2306, 2321), False, 'from pgn.batcher import batcher\n'), ((2348, 2392), 'pgn.test_helper.greedy_decode', 'greedy_decode', (['model', 'dataset', 'vocab', 'params'], {}), '(model, dataset, vocab, params)\n', (2361, 2392), False, 'from pgn.test_helper import beam_decode, greedy_decode\n'), ((2617, 2639), 'pandas.read_csv', 'pd.read_csv', (['TEST_DATA'], {}), '(TEST_DATA)\n', (2628, 2639), True, 'import pandas as pd\n'), ((2874, 2886), 'utils.params.get_params', 'get_params', ([], {}), '()\n', (2884, 2886), False, 'from utils.params import get_params\n'), ((1864, 1910), 'pgn.batcher.beam_test_batch_generator', 'beam_test_batch_generator', (["params['beam_size']"], {}), "(params['beam_size'])\n", (1889, 1910), False, 'from pgn.batcher import beam_test_batch_generator\n'), ((186, 208), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (206, 208), False, 'import os, sys, inspect\n'), ((1979, 2019), 'pgn.test_helper.beam_decode', 'beam_decode', (['model', 'batch', 'vocab', 'params'], {}), '(model, batch, vocab, params)\n', (1990, 2019), False, 'from pgn.test_helper import beam_decode, greedy_decode\n')]
|
import argparse
import os
import sys
def printExceptionAndExit(e):
print(type(e))
print(str(e))
sys.exit()
def extractDataFromFile(header_file, data_file):
with open(header_file) as headerFile:
headers = headerFile.read().splitlines()
with open(data_file) as dataFile:
data_file_lines = dataFile.read().splitlines()
outputData = list()
for header in headers:
for i in range(0, len(data_file_lines)):
currentLine = data_file_lines[i]
if header == currentLine:
outputData.append(currentLine)
outputData.append(data_file_lines[i + 1])
i += 1
return outputData
def getFileNamesFromDirectory(directory):
files = list()
for fileName in os.listdir(directory):
files.append(fileName)
return files
def writeDataToFile(fileData, fileName):
try:
with open(fileName, 'w') as outputFile:
for data in fileData:
outputFile.write(data + '\n')
except Exception as e:
printExceptionAndExit(e)
print('Created file ' + fileName + '.')
def handleCreateDirectoryDecision(directoryToMake):
OPTION_YES = 'y'
OPTION_NO = 'n'
print('That directory doesn\'t seem to exist..')
createDirectoryInput = ''
while createDirectoryInput is not OPTION_YES and createDirectoryInput is not OPTION_NO:
createDirectoryInput = input('Would you like to create it? ('+ OPTION_YES + '/' + OPTION_NO + ') ')
if createDirectoryInput is OPTION_NO:
sys.exit()
os.makedirs(directoryToMake)
def main():
FASTA_EXTENSION = '.fasta'
parser = argparse.ArgumentParser()
parser.add_argument('header_directory', help = 'The directory to get the header data from.')
parser.add_argument('data_directory', help = 'The directory to extract the data files from.')
parser.add_argument('output_files_name', help = 'The common name for the output files.')
parser.add_argument('output_directory', help = 'The output directory.')
arguments = parser.parse_args()
if not os.path.exists(arguments.output_directory):
handleCreateDirectoryDecision(arguments.output_directory)
headerFiles = getFileNamesFromDirectory(arguments.header_directory)
dataFiles = getFileNamesFromDirectory(arguments.data_directory)
outputDataList = list()
for i in range(0, len(headerFiles)):
headerFile = os.path.join(arguments.header_directory, headerFiles[i])
dataFile = os.path.join(arguments.data_directory, dataFiles[i])
data = extractDataFromFile(headerFile, dataFile)
outputDataList.append(data)
for i, outputData in enumerate(outputDataList):
outputFileName = arguments.output_files_name + str(i) + FASTA_EXTENSION
outputFile = os.path.join(arguments.output_directory, outputFileName)
writeDataToFile(outputData, outputFile)
print('Done!')
main()
|
[
"os.makedirs",
"argparse.ArgumentParser",
"os.path.exists",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((109, 119), 'sys.exit', 'sys.exit', ([], {}), '()\n', (117, 119), False, 'import sys\n'), ((772, 793), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (782, 793), False, 'import os\n'), ((1574, 1602), 'os.makedirs', 'os.makedirs', (['directoryToMake'], {}), '(directoryToMake)\n', (1585, 1602), False, 'import os\n'), ((1661, 1686), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1684, 1686), False, 'import argparse\n'), ((1558, 1568), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1566, 1568), False, 'import sys\n'), ((2101, 2143), 'os.path.exists', 'os.path.exists', (['arguments.output_directory'], {}), '(arguments.output_directory)\n', (2115, 2143), False, 'import os\n'), ((2443, 2499), 'os.path.join', 'os.path.join', (['arguments.header_directory', 'headerFiles[i]'], {}), '(arguments.header_directory, headerFiles[i])\n', (2455, 2499), False, 'import os\n'), ((2519, 2571), 'os.path.join', 'os.path.join', (['arguments.data_directory', 'dataFiles[i]'], {}), '(arguments.data_directory, dataFiles[i])\n', (2531, 2571), False, 'import os\n'), ((2819, 2875), 'os.path.join', 'os.path.join', (['arguments.output_directory', 'outputFileName'], {}), '(arguments.output_directory, outputFileName)\n', (2831, 2875), False, 'import os\n')]
|
"""Entity processing job."""
import html
import re
import spacy
from flask import current_app
from sqlalchemy import desc
from aggrep import db
from aggrep.jobs.base import Job
from aggrep.models import Entity, EntityProcessQueue
new_line = re.compile(r"(/\n)")
ws = re.compile(r"\s+")
nlp = spacy.load("en_core_web_md")
BATCH_SIZE = 250
PER_RUN_LIMIT = 3000
EXCLUDES = [
"LANGUAGE",
"DATE",
"TIME",
"PERCENT",
"MONEY",
"QUANTITY",
"ORDINAL",
"CARDINAL",
]
def clean(text):
"""Normalize text."""
text = html.unescape(text)
text = text.encode("ascii", "ignore").decode("utf-8")
text = new_line.sub(" ", text)
text = ws.sub(" ", text)
return text.strip()
def extract(text):
"""Extract entities from a document."""
entities = set()
doc = nlp(text)
for span in doc.ents:
if span.label_ in EXCLUDES:
continue
if len(span.text) < 2 or len(span.text) > 40:
continue
entities.add(span.text)
return entities
class EntityExtractor(Job):
"""EntityExtractor job."""
identifier = "PROCESS"
lock_timeout = 8
def get_enqueued_posts(self):
"""Get enqueued posts."""
posts = (
EntityProcessQueue.query.order_by(desc(EntityProcessQueue.id))
.limit(PER_RUN_LIMIT)
.all()
)
return [eq.post for eq in posts]
def process_batch(self, batch):
"""Process a batch of posts."""
post_ids = []
new_entities = 0
for post in batch:
if post is None:
continue
post_ids.append(post.id)
if not post.desc:
continue
post_doc = extract(clean("{}. {}".format(post.title, post.desc)))
for word in post_doc:
e = Entity(entity=word, post_id=post.id)
db.session.add(e)
new_entities += 1
EntityProcessQueue.query.filter(
EntityProcessQueue.post_id.in_(post_ids)
).delete(synchronize_session="fetch")
try:
db.session.commit()
except Exception:
db.session.rollback()
self.lock.remove()
raise
return new_entities
def run(self):
"""Process entities."""
if self.lock.is_locked():
if not self.lock.is_expired():
current_app.logger.info("Processing still in progress. Skipping.")
return
else:
self.lock.remove()
enqueued_posts = self.get_enqueued_posts()
if len(enqueued_posts) == 0:
current_app.logger.info("No posts in entity processing queue. Skipping...")
return
self.lock.create()
current_app.logger.info(
"Processing {} posts in entity queue.".format(len(enqueued_posts))
)
new_entities = 0
start = 0
while start < len(enqueued_posts):
if not self.lock.is_locked() or self.lock.is_expired():
break
end = start + BATCH_SIZE
batch = enqueued_posts[start:end]
new_entities += self.process_batch(batch)
start = end
current_app.logger.info("Unlocking processor.")
self.lock.remove()
if new_entities > 0:
current_app.logger.info("Added {} entities.".format(new_entities))
def process_entities():
"""Process entities."""
processor = EntityExtractor()
processor.run()
|
[
"aggrep.db.session.commit",
"html.unescape",
"aggrep.models.Entity",
"spacy.load",
"aggrep.models.EntityProcessQueue.post_id.in_",
"sqlalchemy.desc",
"aggrep.db.session.add",
"flask.current_app.logger.info",
"aggrep.db.session.rollback",
"re.compile"
] |
[((244, 264), 're.compile', 're.compile', (['"""(/\\\\n)"""'], {}), "('(/\\\\n)')\n", (254, 264), False, 'import re\n'), ((270, 288), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (280, 288), False, 'import re\n'), ((295, 323), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (305, 323), False, 'import spacy\n'), ((550, 569), 'html.unescape', 'html.unescape', (['text'], {}), '(text)\n', (563, 569), False, 'import html\n'), ((3257, 3304), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""Unlocking processor."""'], {}), "('Unlocking processor.')\n", (3280, 3304), False, 'from flask import current_app\n'), ((2117, 2136), 'aggrep.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2134, 2136), False, 'from aggrep import db\n'), ((2664, 2739), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""No posts in entity processing queue. Skipping..."""'], {}), "('No posts in entity processing queue. Skipping...')\n", (2687, 2739), False, 'from flask import current_app\n'), ((1845, 1881), 'aggrep.models.Entity', 'Entity', ([], {'entity': 'word', 'post_id': 'post.id'}), '(entity=word, post_id=post.id)\n', (1851, 1881), False, 'from aggrep.models import Entity, EntityProcessQueue\n'), ((1898, 1915), 'aggrep.db.session.add', 'db.session.add', (['e'], {}), '(e)\n', (1912, 1915), False, 'from aggrep import db\n'), ((2175, 2196), 'aggrep.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (2194, 2196), False, 'from aggrep import db\n'), ((2420, 2486), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""Processing still in progress. Skipping."""'], {}), "('Processing still in progress. Skipping.')\n", (2443, 2486), False, 'from flask import current_app\n'), ((2004, 2044), 'aggrep.models.EntityProcessQueue.post_id.in_', 'EntityProcessQueue.post_id.in_', (['post_ids'], {}), '(post_ids)\n', (2034, 2044), False, 'from aggrep.models import Entity, EntityProcessQueue\n'), ((1279, 1306), 'sqlalchemy.desc', 'desc', (['EntityProcessQueue.id'], {}), '(EntityProcessQueue.id)\n', (1283, 1306), False, 'from sqlalchemy import desc\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 08:54:32 2021
OK so far:
swoosh h2o: 1994-2019 30S to 30N mean, 82 hpa
regressors:
QBO_CDAS = +5 months lag correlated with h2o: 0.508
Anom_nino3p4 = no lags corr with h2o: -0.167
LR:
no CV does R2 of 0.2857
Cross validate 5 kfolds: mean R2: 0.1786 std R2: 0.245
SVM:
CV 5 kfolds: mean R2: 0.418, mean adj_R2: 0.408,
std R2: 0.047, std adj_R2: 0.0485
need to plot residuals with best model.
@author: shlomi
"""
from strat_paths import work_chaim
ml_path = work_chaim / 'ML'
def split_qbo_en_ln_neut_enso(qbo):
from make_regressors import load_all_regressors
ln = load_all_regressors()['LN'].dropna('time')
en = load_all_regressors()['EN'].dropna('time')
neut = load_all_regressors()['neutENSO'].dropna('time')
qbo_en = qbo.where(en>=0.5).fillna(0)
qbo_en.name = 'qbo_en'
qbo_ln = qbo.where(ln<=-0.5).fillna(0)
qbo_ln.name = 'qbo_ln'
qbo_neut = qbo.where(neut!=0).fillna(0)
qbo_neut.name = 'qbo_neut'
return qbo_en, qbo_ln, qbo_neut
# def CV_splitter_for_xarray_time_series(X_da, time_dim='time', grp='year'):
# groups = X_da.groupby('{}.{}'.format(time_dim, grp)).groups
# sorted_groups = [value for (key, value) in sorted(groups.items())]
# cv = [(sorted_groups[i] + sorted_groups[i+1], sorted_groups[i+2])
# for i in range(len(sorted_groups)-2)]
# return cv\
def ABS_SHAP(df_shap, df):
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index().drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(9, 3), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return
def plot_simplified_shap_tree_explainer(rf_model):
import shap
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
X = produce_X(lag={'qbo_cdas': 5}, syear='1994',
eyear='2019', add_co2=False)
y = produce_y(detrend='lowess',
lat_band_mean=[-15, 15], syear='1994', eyear='2019', standertize=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
rf_model.fit(X_train, y_train)
dfX = X_test.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO'}, axis=1)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
ABS_SHAP(shap_values_rf, dfX)
ax = plt.gca()
ax.set_xlabel(r'H$_{2}$O anomalies (STD) (Red is positive)')
return
def plot_Tree_explainer_shap(rf_model):
import shap
from sklearn.model_selection import train_test_split
X = produce_X(lag={'qbo_cdas': 5})
y = produce_y(detrend=None, lat_band_mean=[-15, 15])
X = X.sel(time=slice('1994', '2019'))
y = y.sel(time=slice('1994', '2019'))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
rf_model.fit(X_train, y_train)
dfX = X_test.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO', 'co2': r'CO$_2$'}, axis=1)
fi = dict(zip(dfX.columns, rf_model.feature_importances_ * 100))
print(fi)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
shap.summary_plot(shap_values_rf, dfX, plot_size=1.1)
return
def plot_model_prediction_fig_3():
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from sklearn.linear_model import LinearRegression
import seaborn as sns
import matplotlib.pyplot as plt
X = produce_X()
X = add_enso2_and_enso_qbo_to_X(X)
y = produce_y(detrend=None, lat_band_mean=[-15, 15])
X_test = X.sel(time=slice('1994', '2019'))
y_test = y.sel(time=slice('1994', '2019'))
X_train = X.sel(time=slice('2005', '2019'))
y_train = y.sel(time=slice('2005', '2019'))
lr = LinearRegression()
rds = make_results_for_MLR(lr, X_train, y_train, X_test=X_test, y_test=y_test)
df = rds['predict'].to_dataframe()
df['y_true'] = y_test.to_dataframe()
df['resid'] = df['predict'] - df['y_true']
df = df.rename({'resid': 'Residuals', 'predict': 'MLR', 'y_true': 'SWOOSH'}, axis=1)
sns.set_theme(style='ticks', font_scale=1.5)
fig, ax = plt.subplots(2, 1, figsize=(18, 7))
df[['SWOOSH', 'MLR']].plot(ax=ax[0], color=['tab:purple', 'tab:red'])
df[['Residuals']].plot(ax=ax[1], color='k', legend=False)
[x.grid(True) for x in ax]
[x.set_xlabel('') for x in ax]
ax[0].set_ylabel(r'H$_{2}$O anomalies [std]')
ax[1].set_ylabel(r'H$_{2}$O residuals [std]')
[x.xaxis.set_minor_locator(AutoMinorLocator()) for x in ax]
[x.xaxis.grid(True, which='minor') for x in ax]
# legend = ax.legend(prop={'size': 13}, ncol=5, loc='upper left')
plot_forecast_busts_lines_datetime(ax[0], color='k')
fig.tight_layout()
# # get handles and labels of legend:
# hands, labes = ax.get_legend_handles_labels()
# colors = [x.get_color() for x in hands]
# # change the text labels to the colors of the lines:
# for i, text in enumerate(legend.get_texts()):
# text.set_color(colors[i])
return fig
def plot_beta_coeffs(rds, col_wrap=3, figsize=(13, 6), extent=[-170, 170, -57.5, 57.5], drop_co2=True):
import cartopy.crs as ccrs
import seaborn as sns
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from palettable.scientific import diverging as divsci
from strato_figures import remove_regressors_and_set_title
predict_cmap = divsci.Vik_20.mpl_colormap
sns.set_theme(style='ticks', font_scale=1.5)
proj = ccrs.PlateCarree(central_longitude=0)
plt_kwargs = dict(add_colorbar=False,
col_wrap=col_wrap,
cmap=predict_cmap, center=0.0, extend='max', vmax=0.6,
levels=41, subplot_kws=dict(projection=proj),
transform=ccrs.PlateCarree(), figsize=figsize)
label = r'$\beta$ coefficients'
gl_list = []
if drop_co2:
rds = rds.drop_sel(regressor='co2')
plt_kwargs.update(extend=None, vmax=None, col_wrap=2)
fg = rds['params'].plot.contourf(col='regressor', **plt_kwargs)
cbar_kws = {'label': '', 'format': '%0.2f'}
cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .035]) # last num controls width
fg.add_colorbar(cax=cbar_ax, orientation="horizontal", **cbar_kws)
for ax in fg.axes.flatten():
ax.coastlines()
ax.set_extent(extent, crs=ccrs.PlateCarree())
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
linewidth=1,
color='black',
alpha=0.5,
linestyle='--',
draw_labels=True)
gl.xlabels_top = False
gl.xlabel_style = {'size': 9}
gl.ylabel_style = {'size': 9}
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.ylocator = mticker.FixedLocator([-45, -30, -15, 0, 15, 30, 45])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl_list.append(gl)
ax = remove_regressors_and_set_title(ax)
gl_list[0].ylabels_right = False
gl_list[1].ylabels_right = False
gl_list[1].ylabels_left = True
gl_list[2].ylabels_right = False
gl_list[3].ylabels_left = True
gl_list[3].ylabels_right = True
try:
gl_list[3].ylabels_right = False
except IndexError:
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.93,
bottom=0.2,
left=0.05,
right=0.979,
hspace=0.275,
wspace=0.044)
# fg = rds['params'].plot.contourf(col='regressor', **plt_kwargs)
# cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025])
# fg.add_colorbar(cax=cbar_ax, orientation="horizontal", label='',
# format='%0.3f')
# # fg.fig.suptitle(label, fontsize=12, fontweight=750)
# [ax.coastlines() for ax in fg.axes.flatten()]
# [ax.gridlines(
# crs=ccrs.PlateCarree(),
# linewidth=1,
# color='black',
# alpha=0.5,
# linestyle='--',
# draw_labels=False) for ax in fg.axes.flatten()]
# fg.fig.subplots_adjust(bottom=0.2, top=0.9, left=0.05)
return fg
def plot_r2_map_predictor_sets_with_co2(path=work_chaim, cpt_source='randel',
save=True):
"""r2 map (lat-lon) for cdas-plags, enso, ch4"""
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
from strato_figures import remove_regressors_and_set_title
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from palettable.colorbrewer import sequential as seqbr
# from palettable.scientific import diverging as divsci
# from palettable.colorbrewer import diverging as divbr
from strat_paths import savefig_path
error_cmap = seqbr.YlGnBu_9.mpl_colormap
sns.set_theme(style='ticks', font_scale=1.5)
titles =[r'(a) $\sum_{i=0}^{5}$CPT(t-$i$)',
r'(b) $\eta_1$ = QBO + ENSO + CO$_2$',
r'(c) $\eta_1$ + QBO $\times$ ENSO + ENSO$^2$',
r'(d) $\eta_1$ + T500 + BDC']
# rds1 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_2004-2019.nc')
# rds2 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_bdc_t500_2004-2019.nc')
# rds3 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_radio_cold_lags6_2004-2019.nc')
# rds4 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_poly_2_no_qbo^2_no_ch4_extra_2004-2019.nc')
rds1 = produce_rds_etas(eta=3, cpt_source=cpt_source)
rds2 = produce_rds_etas(eta=1)
rds3 = produce_rds_etas(eta=4)
rds4 = produce_rds_etas(eta=2)
rds = xr.concat([x['r2'] for x in [rds1, rds2, rds3, rds4]], 'eta')
rds['eta'] = range(1, 5)
rds = rds.sortby('eta')
# fig = plt.figure(figsize=(11, 5))
# ax = fig.add_subplot(1, 1, 1,
# projection=ccrs.PlateCarree(central_longitude=0))
# ax.coastlines()
proj = ccrs.PlateCarree(central_longitude=0)
fg = rds.plot.contourf(col='eta', add_colorbar=False, cmap=error_cmap,
vmin=0.0, extend=None, levels=41, col_wrap=2,
subplot_kws=dict(projection=proj),
transform=ccrs.PlateCarree(), figsize=(13, 6))
# lons = rds.lon.values[0:int(len(rds.lon.values) / 2)][::2]
# lons_mirror = abs(lons[::-1])
# lons = np.concatenate([lons, lons_mirror])
# lats = rds.lat.values[0:int(len(rds.lat.values) / 2)][::2]
# lats_mirror = abs(lats[::-1])
# lats = np.concatenate([lats, lats_mirror])
# ax.set_xticks(lons, crs=ccrs.PlateCarree())
# ax.set_yticks(lats, crs=ccrs.PlateCarree())
# lon_formatter = LongitudeFormatter(zero_direction_label=True)
# lat_formatter = LatitudeFormatter()
# ax.xaxis.set_major_formatter(lon_formatter)
# ax.yaxis.set_major_formatter(lat_formatter)
cbar_kws = {'label': '', 'format': '%0.2f', 'aspect': 20}
cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025]) # last num controls width
fg.add_colorbar(cax=cbar_ax, orientation="horizontal", **cbar_kws)
gl_list = []
for i, ax in enumerate(fg.axes.flatten()):
ax.coastlines()
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
linewidth=1,
color='black',
alpha=0.5,
linestyle='--',
draw_labels=True)
gl.xlabels_top = False
gl.xlabel_style = {'size': 9}
gl.ylabel_style = {'size': 9}
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.ylocator = mticker.FixedLocator([-45, -30, -15, 0, 15, 30, 45])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl_list.append(gl)
if i == 0:
plt.rcParams['axes.titlepad'] = 16
else:
plt.rcParams['axes.titlepad'] = 6
ax.set_title(titles[i])
# ax = remove_regressors_and_set_title(ax)
# gl_list[0].ylabels_right = False
# gl_list[2].ylabels_left = False
# try:
# gl_list[3].ylabels_right = False
# except IndexError:
# pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92,
bottom=0.16,
left=0.065,
right=0.935,
hspace=0.0,
wspace=0.208)
print('Caption: ')
print('The adjusted R^2 for the water vapor anomalies MLR analysis in the 82 hPa level with CH4 ,ENSO, and pressure level lag varied QBO as predictors. This MLR spans from 2004 to 2018')
filename = 'MLR_H2O_r2_map_82_eta_with_co2.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def produce_rds_etas(eta=1, cpt_source='randel'):
""" run produce_MLR_2D_for_figs_6_and_7 with regressors:
eta=1 : co2, anom_nino3p4, qbo_lagged
eta=2 : co2, anom_nino3p4, qbo_lagged, T500, BDC
eta=3 : co2, anom_nino3p4, qbo_lagged + 6XCPT_lagged
eta=4 : co2, anom_nino3p4, qbo_lagged, anom_nino3p4^2, qbo_laggedXanom_nino3p4
co2 is automatically added"""
pred = ['qbo_cdas', 'anom_nino3p4']
if eta == 1:
print('producing eta {} with {}'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False)
elif eta == 2:
pred = pred + ['era5_bdc70', 'era5_t500']
print('producing eta {} with {}'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False)
elif eta == 3:
if cpt_source == 'randel':
pred = ['radio_cold_no_qbo']
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False, reg_shift=['radio_cold_no_qbo', 6])
elif cpt_source == 'sean':
pred = ['cpt_ERA5']
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False, reg_shift=['cpt_ERA5', 6])
print('producing eta {} with {}'.format(eta, pred))
elif eta == 4:
print('producing eta {} with {} and enso^2'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=True)
return rds
def produce_MLR_2D_for_figs_6_and_7(predictors=['qbo_cdas', 'anom_nino3p4'],
lag={'qbo_cdas': 5}, add_enso2=True,
reg_shift=None):
from sklearn.linear_model import LinearRegression
if [x for x in lag.keys()][0] not in predictors:
lag = None
X = produce_X(lag=lag, regressors=predictors, add_co2=True,
reg_shift=reg_shift, standertize=False)
if add_enso2:
X = add_enso2_and_enso_qbo_to_X(X)
X = X.sel(time=slice('2005', '2019'))
y = produce_y(detrend=None, lat_band_mean=None, plevel=82, deseason='std',
filename='swoosh_lonlatpress-20deg-5deg.nc', sw_var='combinedanomh2oq')
y = y.sel(lat=slice(-60, 60))
y = y.sel(time=X.time)
lr = LinearRegression()
rds = make_results_for_MLR(lr, X, y)
return rds
def make_results_for_MLR(lr, X_train, y_train, X_test=None, y_test=None):
import xarray as xr
from sklearn.metrics import r2_score
if len(y_train.dims) > 1:
# assume sample dim is time:
target_dims = [x for x in y_train.dims if x != 'time']
# infer reg_dim from X:
reg_dim = [x for x in X_train.dims if x != 'time'][0]
ys_train = y_train.stack(targets=target_dims)
# fit the model:
lr.fit(X_train, ys_train)
rds = xr.Dataset()
# produce beta:
rds['params'] = xr.DataArray(lr.coef_, dims=['targets', reg_dim])
# produce predict:
if X_test is not None:
rds['predict'] = xr.DataArray(lr.predict(X_test), dims=['time', 'targets'])
else:
rds['predict'] = xr.DataArray(lr.predict(X_train), dims=['time', 'targets'])
# produce R^2:
if y_test is not None:
ys_test = y_test.stack(targets=target_dims)
r2 = r2_score(ys_test, rds['predict'], multioutput='raw_values')
else:
r2 = r2_score(ys_train, rds['predict'], multioutput='raw_values')
rds['r2'] = xr.DataArray(r2, dims='targets')
# dims:
rds[reg_dim] = X_train[reg_dim]
rds['time'] = ys_train['time']
rds['targets'] = ys_train['targets']
# unstack:
rds = rds.unstack('targets')
rds['original'] = y_train
rds.attrs['sample_dim'] = 'time'
rds.attrs['feature_dim'] = 'regressor'
elif len(y_train.dims) == 1:
reg_dim = [x for x in X_train.dims if x != 'time'][0]
# fit the model:
lr.fit(X_train, y_train)
rds = xr.Dataset()
# produce beta:
rds['params'] = xr.DataArray(lr.coef_, dims=[reg_dim])
# produce predict:
if X_test is not None:
rds['predict'] = xr.DataArray(lr.predict(X_test), dims=['time'])
rds['time'] = y_test['time']
else:
rds['predict'] = xr.DataArray(lr.predict(X_train), dims=['time'])
rds['time'] = y_train['time']
# produce R^2:
if y_test is not None:
r2 = r2_score(y_test, rds['predict'])
else:
r2 = r2_score(y_train, rds['predict'])
rds['r2'] = xr.DataArray(r2)
# dims:
rds[reg_dim] = X_train[reg_dim]
rds['original'] = y_train
rds.attrs['sample_dim'] = 'time'
rds.attrs['feature_dim'] = 'regressor'
return rds
def plot_forecast_busts_lines_datetime(ax, color='r', style='--'):
import pandas as pd
dts = ['2010-11', '2011-04', '2015-09', '2016-01', '2016-09', '2017-01']
dts = [pd.to_datetime(x) for x in dts]
[ax.axvline(x, c=color, ls=style) for x in dts]
# three forecast busts:
# 2010D2011JFM, 2015-OND, 2016-OND
# ax.axvline('2010-05', c=color, ls=style)
# ax.axvline('2010-09', c=color, ls=style)
return ax
def plot_model_predictions(da):
""" run produce_CV_predictions_for_all_HP_optimized_models first"""
import seaborn as sns
import matplotlib.pyplot as plt
from aux_functions_strat import convert_da_to_long_form_df
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
sns.set_theme(style='ticks', font_scale=1.5)
df = convert_da_to_long_form_df(da)
fig, ax = plt.subplots(figsize=(18, 5))
ax = sns.lineplot(data=df, x='time', y='value', hue='model/obs.',
legend=True)
lw = ax.lines[4].get_linewidth() # lw of first line
plt.setp(ax.lines[4], linewidth=2.5)
ax.grid(True)
ax.set_xlabel('')
ax.set_ylabel(r'H$_{2}$O anomalies [std]')
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.grid(True, which='minor')
legend = ax.legend(prop={'size': 13}, ncol=5, loc='upper left')
plot_forecast_busts_lines_datetime(ax, color='k')
fig.tight_layout()
# get handles and labels of legend:
hands, labes = ax.get_legend_handles_labels()
colors = [x.get_color() for x in hands]
# change the text labels to the colors of the lines:
for i, text in enumerate(legend.get_texts()):
text.set_color(colors[i])
return fig
def add_enso2_and_enso_qbo_to_X(X):
import xarray as xr
from ML_OOP_stratosphere_gases import poly_features
feats = [x for x in X.regressor.values if 'qbo' in x or 'nino' in x]
other_feats = [x for x in X.regressor.values if 'qbo' not in x and 'nino' not in x]
X1 = poly_features(X.sel(regressor=feats), feature_dim='regressor')
X1 = X1.drop_sel(regressor='qbo_cdas^2')
X = xr.concat([X.sel(regressor=other_feats), X1], 'regressor')
return X
def produce_CV_predictions_for_all_HP_optimized_models(path=ml_path,
cv='kfold'):
import xarray as xr
X = produce_X(syear='1994', eyear='2019', add_co2=False)
y = produce_y(detrend='lowess', lat_band_mean=[-15, 15], syear='1994', eyear='2019')
ml = ML_Classifier_Switcher()
das = []
for model_name in ['RF', 'SVM', 'MLP', 'MLR']:
print('preforming LOO with yearly group for {}.'.format(model_name))
model = ml.pick_model(model_name)
if model_name != 'MLR':
model.set_params(**get_HP_params_from_optimized_model(path=path, model=model_name))
da = cross_val_predict_da(model, X, y, cv=cv)
da.name = model_name + ' model'
das.append(da)
ds = xr.merge(das)
ds['SWOOSH'] = y
da = ds.to_array('model/obs.')
da.name = 'h2o'
return da
def cross_val_predict_da(estimator, X, y, cv='kfold'):
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict
if cv == 'logo':
logo = LeaveOneGroupOut()
groups = X['time'].dt.year
cvr = cross_val_predict(estimator, X, y, groups=groups, cv=logo)
elif cv == 'kfold':
kfold = KFold(n_splits=5, shuffle=True, random_state=1)
cvr = cross_val_predict(estimator, X, y, cv=kfold)
da_ts = y.copy(data=cvr)
da_ts.attrs['estimator'] = estimator.__repr__().split('(')[0]
da_ts.name = da_ts.name + '_' + da_ts.attrs['estimator']
for key, value in estimator.get_params().items():
da_ts.attrs[key] = value
return da_ts
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None):
import numpy as np
def _show_on_single_plot(ax, exclude_bar_num=3):
for i, p in enumerate(ax.patches):
if i != exclude_bar_num and exclude_bar_num is not None:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.1f}'.format(p.get_height())
ax.text(_x, _y, value, ha="right",
fontsize=fs, fontweight=fw, zorder=20)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax, exclude_bar_num)
else:
_show_on_single_plot(axs, exclude_bar_num)
sns.set_theme(style='ticks', font_scale=1.5)
fi_da['regressor'] = ['QBO', 'ENSO']
df = fi_da.to_dataframe('feature_importance') * 100.0
df = df.unstack().melt()
fig, ax = plt.subplots(figsize=(6, 8))
sns.barplot(data=df, x='regressor', y='value', orient='v', ci='sd',
ax=ax, hue='regressor', estimator=np.mean, dodge=False)
ax.set_xlabel('')
ax.set_ylabel('Feature Importance [%]')
show_values_on_bars(ax, fs=16, exclude_bar_num=1)
change_width(ax, 0.31)
ax.legend(loc='upper right')
fig.tight_layout()
return fig
def plot_repeated_kfold_dist(df, model_dict, X, y):
"""run assemble_cvr_dataframe first with strategy=Nonen and add_MLR2"""
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
in_sample_r2 = {}
X2 = add_enso2_and_enso_qbo_to_X(X)
for model_name, model in model_dict.items():
if model_name == 'MLR2':
model.fit(X2, y)
in_sample_r2[model_name] = model.score(X2, y)
else:
model.fit(X, y)
in_sample_r2[model_name] = model.score(X, y)
print(in_sample_r2)
df_melted = df.T.melt(var_name='model', value_name=r'R$^2$')
pal = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:pink']
fg = sns.displot(data=df_melted, x=r'R$^2$', col="model",
kind="hist", col_wrap=2, hue='model', stat='density',
kde=True, palette=pal)
letter = ['a', 'b', 'c', 'd', 'e']
for i, ax in enumerate(fg.axes):
label = ax.title.get_text()
model = label.split('=')[-1].strip()
title = '({}) model = {}'.format(letter[i], model)
ax.set_title(title)
mean = df.T.mean().loc[model]
std = df.T.std().loc[model]
median = df.T.median().loc[model]
in_sample = in_sample_r2[model]
textstr = '\n'.join((
r'$\mathrm{mean}=%.2f$' % (mean, ),
r'$\mathrm{median}=%.2f$' % (median, ),
r'$\mathrm{std}=%.2f$' % (std, ),
r'in sample result$=%.2f$' % (in_sample, )))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# fg.fig.suptitle('Out of sample testing models comparison')
# fg.fig.subplots_adjust(top=0.916)
# fg.fig.tight_layout()
return fg
def assemble_cvr_dataframe(path=ml_path, score='test_r2', n_splits=5,
strategy='LOGO-year', add_MLR2=False):
import pandas as pd
rf, rf_model = cross_validate_using_optimized_HP(
path, model='RF', n_splits=n_splits, strategy=strategy)
svm, svm_model = cross_validate_using_optimized_HP(
path, model='SVM', n_splits=n_splits, strategy=strategy)
mlp, mlp_model = cross_validate_using_optimized_HP(
path, model='MLP', n_splits=n_splits, strategy=strategy)
lr, lr_model = cross_validate_using_optimized_HP(
path, model='MLR', n_splits=n_splits, strategy=strategy)
lr2, lr2_model = cross_validate_using_optimized_HP(
path, model='MLR', n_splits=n_splits, strategy=strategy,
add_MLR2=add_MLR2)
if add_MLR2:
df = pd.DataFrame([rf[score], svm[score], mlp[score], lr[score], lr2[score]])
df.index = ['RF', 'SVM', 'MLP', 'MLR', 'MLR2']
len_cols = len(df.columns)
df.columns = ['kfold_{}'.format(x+1) for x in range(len_cols)]
model_dict = {'RF': rf_model, 'SVM': svm_model,
'MLP': mlp_model, 'MLR': lr_model, 'MLR2': lr2_model}
else:
df = pd.DataFrame([rf[score], svm[score], mlp[score], lr[score]])
df.index = ['RF', 'SVM', 'MLP', 'MLR']
len_cols = len(df.columns)
df.columns = ['kfold_{}'.format(x+1) for x in range(len_cols)]
model_dict = {'RF': rf_model, 'SVM': svm_model,
'MLP': mlp_model, 'MLR': lr_model}
return df, model_dict
def cross_validate_using_optimized_HP(path=ml_path, model='SVM', n_splits=5,
n_repeats=20, strategy='LOGO-year',
scorers=['r2', 'r2_adj',
'neg_mean_squared_error',
'explained_variance'],
add_MLR2=False):
from sklearn.model_selection import cross_validate
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GroupShuffleSplit
logo = LeaveOneGroupOut()
gss = GroupShuffleSplit(n_splits=20, test_size=0.1, random_state=1)
from sklearn.metrics import make_scorer
X = produce_X(syear='1994', eyear='2019', add_co2=False)
if add_MLR2:
X = add_enso2_and_enso_qbo_to_X(X)
print('adding ENSO^2 and ENSO*QBO')
y = produce_y(detrend='lowess', lat_band_mean=[-15, 15], syear='1994', eyear='2019')
groups = X['time'].dt.year
scores_dict = {s: s for s in scorers}
if 'r2_adj' in scorers:
scores_dict['r2_adj'] = make_scorer(r2_adj_score)
if 'MLR' not in model:
hp_params = get_HP_params_from_optimized_model(path, model)
ml = ML_Classifier_Switcher()
ml_model = ml.pick_model(model_name=model)
if 'MLR' not in model:
ml_model.set_params(**hp_params)
print(ml_model)
# cv = TimeSeriesSplit(5)
# cv = KFold(10, shuffle=True, random_state=1)
cv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=1)
if strategy == 'LOGO-year':
print('using LeaveOneGroupOut strategy.')
cvr = cross_validate(ml_model, X, y, scoring=scores_dict, cv=logo,
groups=groups)
elif strategy == 'GSS-year':
print('using GroupShuffleSplit strategy.')
cvr = cross_validate(ml_model, X, y, scoring=scores_dict, cv=gss,
groups=groups)
else:
cvr = cross_validate(ml_model, X, y, scoring=scores_dict, cv=cv)
return cvr, ml_model
def manual_cross_validation_for_RF_feature_importances(rf_model, n_splits=5, n_repeats=20, scorers=['r2', 'r2_adj',
'neg_mean_squared_error',
'explained_variance']):
from sklearn.model_selection import KFold
import xarray as xr
import numpy as np
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import make_scorer
scores_dict = {s: s for s in scorers}
if 'r2_adj' in scorers:
scores_dict['r2_adj'] = make_scorer(r2_adj_score)
print(rf_model)
X = produce_X(syear='1994', eyear='2019')
y = produce_y(syear='1994', eyear='2019')
# cv = TimeSeriesSplit(5)
# cv = KFold(10, shuffle=True, random_state=1)
cv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=1)
fis = []
for train_index, test_index in cv.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
rf_model.fit(X_train, y_train)
fis.append(rf_model.feature_importances_)
fi = xr.DataArray(fis, dims=['repeats', 'regressor'])
fi['repeats'] = np.arange(1, len(fis)+1)
fi['regressor'] = X['regressor']
return fi
def get_HP_params_from_optimized_model(path=ml_path, model='SVM'):
import joblib
from aux_functions_strat import path_glob
files = path_glob(path, 'GRSRCHCV_*.pkl')
file = [x for x in files if model in x.as_posix()][0]
gr = joblib.load(file)
df = read_one_gridsearchcv_object(gr)
return df.iloc[0][:-2].to_dict()
def produce_X(regressors=['qbo_cdas', 'anom_nino3p4'],
lag={'qbo_cdas': 5}, add_co2=True, standertize=False,
reg_shift=None, syear=None, eyear=None):
"""reg_shift is dict = {regressor: n} where n is the number of times to
shift backwards one month"""
from make_regressors import load_all_regressors
from ML_OOP_stratosphere_gases import regressor_shift
import xarray as xr
ds = load_all_regressors()
ds = ds[regressors].dropna('time')
if lag is not None:
for key, value in lag.items():
print(key, value)
ds[key] = ds[key].shift(time=value)
if standertize:
ds = (ds - ds.mean('time')) / ds.std('time')
if add_co2:
ds['co2'] = produce_co2_trend()
if reg_shift is not None:
dss = regressor_shift(ds[reg_shift[0]].dropna('time'), shifts=[1,reg_shift[-1]])
ds = xr.merge([ds, dss])
if syear is not None:
ds = ds.sel(time=slice(syear, None))
if eyear is not None:
ds = ds.sel(time=slice(None, eyear))
if ((syear is not None) or (eyear is not None)) and add_co2:
ds['co2'] = (ds['co2'] - ds['co2'].mean('time')) / ds['co2'].std('time')
X = ds.dropna('time').to_array('regressor')
X = X.transpose('time', 'regressor')
return X
def produce_y(path=work_chaim, detrend=None,
sw_var='combinedeqfillanomfillh2oq', filename='swoosh_latpress-2.5deg.nc',
lat_band_mean=[-5, 5], plevel=82, deseason='mean', standertize=True,
syear=None, eyear=None):
import xarray as xr
from aux_functions_strat import lat_mean
from aux_functions_strat import detrend_ts
from aux_functions_strat import anomalize_xr
file = path / filename
da = xr.open_dataset(file)[sw_var]
if plevel is not None:
da = da.sel(level=plevel, method='nearest')
if lat_band_mean is not None:
da = lat_mean(da.sel(lat=slice(lat_band_mean[0], lat_band_mean[1])))
if detrend is not None:
if detrend == 'lowess':
print('lowess detrend for h2o')
da = detrend_ts(da)
if deseason is not None:
print('deseasonlizing h2o...')
da = anomalize_xr(da, freq='MS', units=deseason, time_dim='time')
if standertize is not None:
print('standertzing h2o')
da = (da - da.mean('time')) / da.std('time')
if syear is not None:
print('picking {} as start year'.format(syear))
da = da.sel(time=slice(syear, None))
if eyear is not None:
print('picking {} as end year'.format(eyear))
da = da.sel(time=slice(None, eyear))
y = da
return y
def produce_co2_trend(standertize=True):
from make_regressors import load_all_regressors
from aux_functions_strat import loess_curve
ds = load_all_regressors()
co2 = ds['co2'].dropna('time')
trend = loess_curve(co2, plot=False)
if standertize:
co2 = (trend['mean']-trend['mean'].mean('time')) / \
trend['mean'].std('time')
return co2
else:
return trend['mean']
def r2_adj_score(y_true, y_pred, **kwargs):
from sklearn.metrics import r2_score
r2 = r2_score(y_true, y_pred)
n = len(y_true)
if 'p' in kwargs:
p = kwargs['p']
else:
p = 2
r2_adj = 1.0 - (1.0 - r2) * (n - 1.0) / (n - p)
# r2_adj = 1-(1-r2)*(n-1)/(n-p-1)
return r2_adj
def Optimize_HP_per_model(test_size=0.1, model_name='SVM',
n_splits=5, savepath=None):
from sklearn.model_selection import train_test_split
X = produce_X(syear='1994', eyear='2019', add_co2=False)
y = produce_y(detrend='lowess', lat_band_mean=[-15, 15], syear='1994', eyear='2019')
if test_size is None:
X_val = X
y_val = y
else:
X_val, X_test, y_val, y_test = train_test_split(X, y, test_size=test_size)
gr = single_cross_validation(X_val, y_val, model_name=model_name,
n_splits=n_splits,
savepath=savepath)
return gr
def single_cross_validation(X_val, y_val, model_name='SVM',
n_splits=5, scorers=['r2', 'r2_adj',
'neg_mean_squared_error',
'explained_variance'],
seed=42, savepath=None, verbose=0,
param_grid='dense', n_jobs=-1):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
# import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'r2_adj' in scorers:
scores_dict['r2_adj'] = make_scorer(r2_adj_score)
X = X_val.dropna('time').sel(time=y_val['time'])
y = y_val
# if param_grid == 'light':
# print(np.unique(X.feature.values))
# configure the cross-validation procedure
# cv = TimeSeriesSplit(n_splits=n_splits)
cv = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
# print('CV TimeSeriesKfolds of {}.'.format(n_splits))
print('CV KFold of {}.'.format(n_splits))
# define the model and search space:
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
# if outer_split == '1-1':
# cv_type = 'holdout'
# print('holdout cv is selected.')
# else:
# cv_type = 'nested'
# print('nested cv {} out of {}.'.format(
# outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
features = [x for x in X['regressor'].values]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}.pkl'.format(model_name, '+'.join(features), '+'.join(
scorers), n_splits,
param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_gridsearchcv_object(GridSearchCV, savepath, filename):
import joblib
print('{} was saved to {}'.format(filename, savepath))
joblib.dump(GridSearchCV, savepath / filename)
return
def load_one_gridsearchcv_object(path=ml_path, model_name='SVM', verbose=True):
"""load one gridsearchcv obj with model_name and features and run read_one_gridsearchcv_object"""
from aux_functions_strat import path_glob
import joblib
# first filter for model name:
if verbose:
print('loading GridsearchCVs results for {} model'.format(model_name))
model_files = path_glob(path, 'GRSRCHCV_*.pkl')
model_files = [x for x in model_files if model_name in x.as_posix()]
# now select features:
# if verbose:
# print('loading GridsearchCVs results with {} features'.format(features))
# model_features = [x.as_posix().split('/')[-1].split('_')[3] for x in model_files]
# feat_ind = get_feature_set_from_list(model_features, features)
# also get the test ratio and seed number:
# if len(feat_ind) > 1:
# if verbose:
# print('found {} GR objects.'.format(len(feat_ind)))
# files = sorted([model_files[x] for x in feat_ind])
# outer_splits = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-3] for x in files]
# grs = [joblib.load(x) for x in files]
# best_dfs = [read_one_gridsearchcv_object(x) for x in grs]
# di = dict(zip(outer_splits, best_dfs))
# return di
# else:
# file = model_files[feat_ind]
# seed = file.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
# outer_splits = file.as_posix().split('/')[-1].split('.')[0].split('_')[-3]
# load and produce best_df:
gr = joblib.load(model_files[0])
best_df = read_one_gridsearchcv_object(gr)
return best_df
def read_one_gridsearchcv_object(gr):
"""read one gridsearchcv multimetric object and
get the best params, best mean/std scores"""
import pandas as pd
# first get all the scorers used:
scorers = [x for x in gr.scorer_.keys()]
# now loop over the scorers:
best_params = []
best_mean_scores = []
best_std_scores = []
for scorer in scorers:
df_mean = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["mean_test_{}".format(scorer)], columns=[scorer])], axis=1)
df_std = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["std_test_{}".format(scorer)], columns=[scorer])], axis=1)
# best index = highest score:
best_ind = df_mean[scorer].idxmax()
best_mean_scores.append(df_mean.iloc[best_ind][scorer])
best_std_scores.append(df_std.iloc[best_ind][scorer])
best_params.append(df_mean.iloc[best_ind].to_frame().T.iloc[:, :-1])
best_df = pd.concat(best_params)
best_df['mean_score'] = best_mean_scores
best_df['std_score'] = best_std_scores
best_df.index = scorers
return best_df
def order_of_mag(minimal=-5, maximal=1):
import numpy as np
return [10**float(x) for x in np.arange(minimal, maximal + 1)]
class ML_Classifier_Switcher(object):
def pick_model(self, model_name, pgrid='normal'):
"""Dispatch method"""
# from sklearn.model_selection import GridSearchCV
self.param_grid = None
method_name = str(model_name)
# Get the method from 'self'. Default to a lambda.
method = getattr(self, method_name, lambda: "Invalid ML Model")
# if gridsearch:
# return(GridSearchCV(method(), self.param_grid, n_jobs=-1,
# return_train_score=True))
# else:
# Call the method as we return it
# whether to select lighter param grid, e.g., for testing purposes.
self.pgrid = pgrid
return method()
def SVM(self):
from sklearn.svm import SVR
import numpy as np
if self.pgrid == 'light':
self.param_grid = {'kernel': ['poly'],
'C': [0.1],
'gamma': [0.0001],
'degree': [1, 2],
'coef0': [1, 4]}
# elif self.pgrid == 'normal':
# self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear', 'poly'],
# 'C': order_of_mag(-1, 2),
# 'gamma': order_of_mag(-5, 0),
# 'degree': [1, 2, 3, 4, 5],
# 'coef0': [0, 1, 2, 3, 4]}
elif self.pgrid == 'dense':
# self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear', 'poly'],
# 'C': np.logspace(-2, 2, 10), # order_of_mag(-2, 2),
# 'gamma': np.logspace(-5, 1, 14), # order_of_mag(-5, 0),
# 'degree': [1, 2, 3, 4, 5],
# 'coef0': [0, 1, 2, 3, 4]}
self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear'],
'C': np.logspace(-2, 2, 10), # order_of_mag(-2, 2),
'gamma': np.logspace(-5, 1, 14)}#, # order_of_mag(-5, 0),
# 'degree': [1, 2, 3, 4, 5],
# 'coef0': [0, 1, 2, 3, 4]}
return SVR()
def MLP(self):
import numpy as np
from sklearn.neural_network import MLPRegressor
if self.pgrid == 'light':
self.param_grid = {
'activation': [
'identity',
'relu'],
'hidden_layer_sizes': [(50, 50, 50), (50, 100, 50)]}
# elif self.pgrid == 'normal':
# self.param_grid = {'alpha': order_of_mag(-5, 1),
# 'activation': ['identity', 'logistic', 'tanh', 'relu'],
# 'hidden_layer_sizes': [(50, 50, 50), (50, 100, 50), (100,)],
# 'learning_rate': ['constant', 'adaptive'],
# 'solver': ['adam', 'lbfgs', 'sgd']}
elif self.pgrid == 'dense':
self.param_grid = {'alpha': np.logspace(-5, 1, 7),
'activation': ['identity', 'logistic', 'tanh', 'relu'],
'hidden_layer_sizes': [(10, 10, 10), (10, 20, 10), (10,), (5,), (1,)],
'learning_rate': ['constant'],
'solver': ['adam', 'sgd']}
#(1,),(2,),(3,),(4,),(5,),(6,),(7,),(8,),(9,),(10,),(11,), (12,),(13,),(14,),(15,),(16,),(17,),(18,),(19,),(20,),(21,)
return MLPRegressor(random_state=42, max_iter=500, learning_rate_init=0.1)
def RF(self):
from sklearn.ensemble import RandomForestRegressor
# import numpy as np
if self.pgrid == 'light':
self.param_grid = {'max_features': ['auto', 'sqrt']}
elif self.pgrid == 'normal':
self.param_grid = {'max_depth': [5, 10, 25, 50, 100],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2, 5, 10],
'min_samples_split': [2, 5, 15, 50],
'n_estimators': [100, 300, 700, 1200]
}
elif self.pgrid == 'dense':
self.param_grid = {'max_depth': [2, 5, 10],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2],
'min_samples_split': [2, 5],
'n_estimators': [50, 100, 400]
}
return RandomForestRegressor(random_state=42, n_jobs=-1)
def MLR(self):
from sklearn.linear_model import LinearRegression
return LinearRegression(n_jobs=-1)
|
[
"seaborn.lineplot",
"sklearn.model_selection.GridSearchCV",
"numpy.abs",
"sklearn.model_selection.cross_validate",
"sklearn.model_selection.train_test_split",
"aux_functions_strat.anomalize_xr",
"sklearn.metrics.r2_score",
"joblib.dump",
"numpy.logspace",
"aux_functions_strat.path_glob",
"numpy.arange",
"matplotlib.pyplot.gca",
"sklearn.model_selection.RepeatedKFold",
"pandas.DataFrame",
"matplotlib.pyplot.setp",
"matplotlib.ticker.FixedLocator",
"aux_functions_strat.convert_da_to_long_form_df",
"xarray.merge",
"sklearn.metrics.make_scorer",
"aux_functions_strat.loess_curve",
"sklearn.model_selection.LeaveOneGroupOut",
"matplotlib.pyplot.subplots",
"pandas.concat",
"seaborn.set_theme",
"numpy.ndenumerate",
"numpy.corrcoef",
"seaborn.barplot",
"xarray.concat",
"sklearn.linear_model.LinearRegression",
"xarray.Dataset",
"make_regressors.load_all_regressors",
"matplotlib.ticker.AutoMinorLocator",
"pandas.to_datetime",
"sklearn.ensemble.RandomForestRegressor",
"pandas.Series",
"strato_figures.remove_regressors_and_set_title",
"shap.summary_plot",
"sklearn.model_selection.GroupShuffleSplit",
"sklearn.svm.SVR",
"seaborn.displot",
"aux_functions_strat.detrend_ts",
"xarray.open_dataset",
"shap.Explainer",
"sklearn.model_selection.KFold",
"sklearn.model_selection.cross_val_predict",
"sklearn.neural_network.MLPRegressor",
"numpy.where",
"xarray.DataArray",
"cartopy.crs.PlateCarree",
"joblib.load",
"matplotlib.pyplot.savefig"
] |
[((1556, 1600), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (1569, 1600), True, 'import seaborn as sns\n'), ((1680, 1701), 'pandas.DataFrame', 'pd.DataFrame', (['df_shap'], {}), '(df_shap)\n', (1692, 1701), True, 'import pandas as pd\n'), ((2282, 2326), 'numpy.where', 'np.where', (["(corr_df['Corr'] > 0)", '"""red"""', '"""blue"""'], {}), "(corr_df['Corr'] > 0, 'red', 'blue')\n", (2290, 2326), True, 'import numpy as np\n'), ((2357, 2371), 'numpy.abs', 'np.abs', (['shap_v'], {}), '(shap_v)\n', (2363, 2371), True, 'import numpy as np\n'), ((3243, 3280), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (3259, 3280), False, 'from sklearn.model_selection import train_test_split\n'), ((3467, 3491), 'shap.Explainer', 'shap.Explainer', (['rf_model'], {}), '(rf_model)\n', (3481, 3491), False, 'import shap\n'), ((3579, 3588), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3586, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3999, 4036), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (4015, 4036), False, 'from sklearn.model_selection import train_test_split\n'), ((4324, 4348), 'shap.Explainer', 'shap.Explainer', (['rf_model'], {}), '(rf_model)\n', (4338, 4348), False, 'import shap\n'), ((4397, 4450), 'shap.summary_plot', 'shap.summary_plot', (['shap_values_rf', 'dfX'], {'plot_size': '(1.1)'}), '(shap_values_rf, dfX, plot_size=1.1)\n', (4414, 4450), False, 'import shap\n'), ((5000, 5018), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5016, 5018), False, 'from sklearn.linear_model import LinearRegression\n'), ((5322, 5366), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (5335, 5366), True, 'import seaborn as sns\n'), ((5381, 5416), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(18, 7)'}), '(2, 1, figsize=(18, 7))\n', (5393, 5416), True, 'import matplotlib.pyplot as plt\n'), ((6739, 6783), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (6752, 6783), True, 'import seaborn as sns\n'), ((6795, 6832), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(0)'}), '(central_longitude=0)\n', (6811, 6832), True, 'import cartopy.crs as ccrs\n'), ((10276, 10320), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (10289, 10320), True, 'import seaborn as sns\n'), ((11217, 11278), 'xarray.concat', 'xr.concat', (["[x['r2'] for x in [rds1, rds2, rds3, rds4]]", '"""eta"""'], {}), "([x['r2'] for x in [rds1, rds2, rds3, rds4]], 'eta')\n", (11226, 11278), True, 'import xarray as xr\n'), ((11518, 11555), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(0)'}), '(central_longitude=0)\n', (11534, 11555), True, 'import cartopy.crs as ccrs\n'), ((16522, 16540), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (16538, 16540), False, 'from sklearn.linear_model import LinearRegression\n'), ((19820, 19864), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (19833, 19864), True, 'import seaborn as sns\n'), ((19874, 19904), 'aux_functions_strat.convert_da_to_long_form_df', 'convert_da_to_long_form_df', (['da'], {}), '(da)\n', (19900, 19904), False, 'from aux_functions_strat import convert_da_to_long_form_df\n'), ((19919, 19948), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18, 5)'}), '(figsize=(18, 5))\n', (19931, 19948), True, 'import matplotlib.pyplot as plt\n'), ((19958, 20031), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df', 'x': '"""time"""', 'y': '"""value"""', 'hue': '"""model/obs."""', 'legend': '(True)'}), "(data=df, x='time', y='value', hue='model/obs.', legend=True)\n", (19970, 20031), True, 'import seaborn as sns\n'), ((20115, 20151), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.lines[4]'], {'linewidth': '(2.5)'}), '(ax.lines[4], linewidth=2.5)\n', (20123, 20151), True, 'import matplotlib.pyplot as plt\n'), ((22025, 22038), 'xarray.merge', 'xr.merge', (['das'], {}), '(das)\n', (22033, 22038), True, 'import xarray as xr\n'), ((24120, 24164), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (24133, 24164), True, 'import seaborn as sns\n'), ((24307, 24335), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (24319, 24335), True, 'import matplotlib.pyplot as plt\n'), ((24340, 24467), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'x': '"""regressor"""', 'y': '"""value"""', 'orient': '"""v"""', 'ci': '"""sd"""', 'ax': 'ax', 'hue': '"""regressor"""', 'estimator': 'np.mean', 'dodge': '(False)'}), "(data=df, x='regressor', y='value', orient='v', ci='sd', ax=ax,\n hue='regressor', estimator=np.mean, dodge=False)\n", (24351, 24467), True, 'import seaborn as sns\n'), ((24858, 24902), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (24871, 24902), True, 'import seaborn as sns\n'), ((25404, 25536), 'seaborn.displot', 'sns.displot', ([], {'data': 'df_melted', 'x': '"""R$^2$"""', 'col': '"""model"""', 'kind': '"""hist"""', 'col_wrap': '(2)', 'hue': '"""model"""', 'stat': '"""density"""', 'kde': '(True)', 'palette': 'pal'}), "(data=df_melted, x='R$^2$', col='model', kind='hist', col_wrap=2,\n hue='model', stat='density', kde=True, palette=pal)\n", (25415, 25536), True, 'import seaborn as sns\n'), ((28864, 28882), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (28880, 28882), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((28893, 28954), 'sklearn.model_selection.GroupShuffleSplit', 'GroupShuffleSplit', ([], {'n_splits': '(20)', 'test_size': '(0.1)', 'random_state': '(1)'}), '(n_splits=20, test_size=0.1, random_state=1)\n', (28910, 28954), False, 'from sklearn.model_selection import GroupShuffleSplit\n'), ((29766, 29835), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats', 'random_state': '(1)'}), '(n_splits=n_splits, n_repeats=n_repeats, random_state=1)\n', (29779, 29835), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((31259, 31328), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats', 'random_state': '(1)'}), '(n_splits=n_splits, n_repeats=n_repeats, random_state=1)\n', (31272, 31328), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((31683, 31731), 'xarray.DataArray', 'xr.DataArray', (['fis'], {'dims': "['repeats', 'regressor']"}), "(fis, dims=['repeats', 'regressor'])\n", (31695, 31731), True, 'import xarray as xr\n'), ((31973, 32006), 'aux_functions_strat.path_glob', 'path_glob', (['path', '"""GRSRCHCV_*.pkl"""'], {}), "(path, 'GRSRCHCV_*.pkl')\n", (31982, 32006), False, 'from aux_functions_strat import path_glob\n'), ((32074, 32091), 'joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (32085, 32091), False, 'import joblib\n'), ((32603, 32624), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (32622, 32624), False, 'from make_regressors import load_all_regressors\n'), ((34980, 35001), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (34999, 35001), False, 'from make_regressors import load_all_regressors\n'), ((35049, 35077), 'aux_functions_strat.loess_curve', 'loess_curve', (['co2'], {'plot': '(False)'}), '(co2, plot=False)\n', (35060, 35077), False, 'from aux_functions_strat import loess_curve\n'), ((35351, 35375), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (35359, 35375), False, 'from sklearn.metrics import r2_score\n'), ((37467, 37524), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'random_state': 'seed', 'shuffle': '(True)'}), '(n_splits=n_splits, random_state=seed, shuffle=True)\n', (37472, 37524), False, 'from sklearn.model_selection import KFold\n'), ((38161, 38325), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'sk_model', 'param_grid': 'search_space', 'cv': 'cv', 'n_jobs': 'n_jobs', 'scoring': 'scores_dict', 'verbose': 'verbose', 'refit': '(False)', 'return_train_score': '(True)'}), '(estimator=sk_model, param_grid=search_space, cv=cv, n_jobs=\n n_jobs, scoring=scores_dict, verbose=verbose, refit=False,\n return_train_score=True)\n', (38173, 38325), False, 'from sklearn.model_selection import GridSearchCV\n'), ((38932, 38978), 'joblib.dump', 'joblib.dump', (['GridSearchCV', '(savepath / filename)'], {}), '(GridSearchCV, savepath / filename)\n', (38943, 38978), False, 'import joblib\n'), ((39386, 39419), 'aux_functions_strat.path_glob', 'path_glob', (['path', '"""GRSRCHCV_*.pkl"""'], {}), "(path, 'GRSRCHCV_*.pkl')\n", (39395, 39419), False, 'from aux_functions_strat import path_glob\n'), ((40540, 40567), 'joblib.load', 'joblib.load', (['model_files[0]'], {}), '(model_files[0])\n', (40551, 40567), False, 'import joblib\n'), ((41651, 41673), 'pandas.concat', 'pd.concat', (['best_params'], {}), '(best_params)\n', (41660, 41673), True, 'import pandas as pd\n'), ((8034, 8090), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-180, -120, -60, 0, 60, 120, 180]'], {}), '([-180, -120, -60, 0, 60, 120, 180])\n', (8054, 8090), True, 'import matplotlib.ticker as mticker\n'), ((8113, 8165), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-45, -30, -15, 0, 15, 30, 45]'], {}), '([-45, -30, -15, 0, 15, 30, 45])\n', (8133, 8165), True, 'import matplotlib.ticker as mticker\n'), ((8293, 8328), 'strato_figures.remove_regressors_and_set_title', 'remove_regressors_and_set_title', (['ax'], {}), '(ax)\n', (8324, 8328), False, 'from strato_figures import remove_regressors_and_set_title\n'), ((13094, 13150), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-180, -120, -60, 0, 60, 120, 180]'], {}), '([-180, -120, -60, 0, 60, 120, 180])\n', (13114, 13150), True, 'import matplotlib.ticker as mticker\n'), ((13173, 13225), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-45, -30, -15, 0, 15, 30, 45]'], {}), '([-45, -30, -15, 0, 15, 30, 45])\n', (13193, 13225), True, 'import matplotlib.ticker as mticker\n'), ((14265, 14322), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(savefig_path / filename)'], {'bbox_inches': '"""tight"""'}), "(savefig_path / filename, bbox_inches='tight')\n", (14276, 14322), True, 'import matplotlib.pyplot as plt\n'), ((17089, 17101), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (17099, 17101), True, 'import xarray as xr\n'), ((17150, 17199), 'xarray.DataArray', 'xr.DataArray', (['lr.coef_'], {'dims': "['targets', reg_dim]"}), "(lr.coef_, dims=['targets', reg_dim])\n", (17162, 17199), True, 'import xarray as xr\n'), ((17748, 17780), 'xarray.DataArray', 'xr.DataArray', (['r2'], {'dims': '"""targets"""'}), "(r2, dims='targets')\n", (17760, 17780), True, 'import xarray as xr\n'), ((19256, 19273), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (19270, 19273), True, 'import pandas as pd\n'), ((20270, 20288), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (20286, 20288), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator\n'), ((22383, 22401), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (22399, 22401), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((22451, 22509), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['estimator', 'X', 'y'], {'groups': 'groups', 'cv': 'logo'}), '(estimator, X, y, groups=groups, cv=logo)\n', (22468, 22509), False, 'from sklearn.model_selection import cross_val_predict\n'), ((27371, 27443), 'pandas.DataFrame', 'pd.DataFrame', (['[rf[score], svm[score], mlp[score], lr[score], lr2[score]]'], {}), '([rf[score], svm[score], mlp[score], lr[score], lr2[score]])\n', (27383, 27443), True, 'import pandas as pd\n'), ((27760, 27820), 'pandas.DataFrame', 'pd.DataFrame', (['[rf[score], svm[score], mlp[score], lr[score]]'], {}), '([rf[score], svm[score], mlp[score], lr[score]])\n', (27772, 27820), True, 'import pandas as pd\n'), ((29386, 29411), 'sklearn.metrics.make_scorer', 'make_scorer', (['r2_adj_score'], {}), '(r2_adj_score)\n', (29397, 29411), False, 'from sklearn.metrics import make_scorer\n'), ((29955, 30030), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ml_model', 'X', 'y'], {'scoring': 'scores_dict', 'cv': 'logo', 'groups': 'groups'}), '(ml_model, X, y, scoring=scores_dict, cv=logo, groups=groups)\n', (29969, 30030), False, 'from sklearn.model_selection import cross_validate\n'), ((31031, 31056), 'sklearn.metrics.make_scorer', 'make_scorer', (['r2_adj_score'], {}), '(r2_adj_score)\n', (31042, 31056), False, 'from sklearn.metrics import make_scorer\n'), ((33066, 33085), 'xarray.merge', 'xr.merge', (['[ds, dss]'], {}), '([ds, dss])\n', (33074, 33085), True, 'import xarray as xr\n'), ((33935, 33956), 'xarray.open_dataset', 'xr.open_dataset', (['file'], {}), '(file)\n', (33950, 33956), True, 'import xarray as xr\n'), ((34372, 34432), 'aux_functions_strat.anomalize_xr', 'anomalize_xr', (['da'], {'freq': '"""MS"""', 'units': 'deseason', 'time_dim': '"""time"""'}), "(da, freq='MS', units=deseason, time_dim='time')\n", (34384, 34432), False, 'from aux_functions_strat import anomalize_xr\n'), ((36007, 36050), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size'}), '(X, y, test_size=test_size)\n', (36023, 36050), False, 'from sklearn.model_selection import train_test_split\n'), ((37192, 37217), 'sklearn.metrics.make_scorer', 'make_scorer', (['r2_adj_score'], {}), '(r2_adj_score)\n', (37203, 37217), False, 'from sklearn.metrics import make_scorer\n'), ((44171, 44176), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (44174, 44176), False, 'from sklearn.svm import SVR\n'), ((45492, 45559), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'random_state': '(42)', 'max_iter': '(500)', 'learning_rate_init': '(0.1)'}), '(random_state=42, max_iter=500, learning_rate_init=0.1)\n', (45504, 45559), False, 'from sklearn.neural_network import MLPRegressor\n'), ((46556, 46605), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'n_jobs': '(-1)'}), '(random_state=42, n_jobs=-1)\n', (46577, 46605), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((46699, 46726), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (46715, 46726), False, 'from sklearn.linear_model import LinearRegression\n'), ((5750, 5768), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (5766, 5768), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator\n'), ((7093, 7111), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7109, 7111), True, 'import cartopy.crs as ccrs\n'), ((11803, 11821), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11819, 11821), True, 'import cartopy.crs as ccrs\n'), ((17576, 17635), 'sklearn.metrics.r2_score', 'r2_score', (['ys_test', "rds['predict']"], {'multioutput': '"""raw_values"""'}), "(ys_test, rds['predict'], multioutput='raw_values')\n", (17584, 17635), False, 'from sklearn.metrics import r2_score\n'), ((17667, 17727), 'sklearn.metrics.r2_score', 'r2_score', (['ys_train', "rds['predict']"], {'multioutput': '"""raw_values"""'}), "(ys_train, rds['predict'], multioutput='raw_values')\n", (17675, 17727), False, 'from sklearn.metrics import r2_score\n'), ((18266, 18278), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (18276, 18278), True, 'import xarray as xr\n'), ((18327, 18365), 'xarray.DataArray', 'xr.DataArray', (['lr.coef_'], {'dims': '[reg_dim]'}), '(lr.coef_, dims=[reg_dim])\n', (18339, 18365), True, 'import xarray as xr\n'), ((18865, 18881), 'xarray.DataArray', 'xr.DataArray', (['r2'], {}), '(r2)\n', (18877, 18881), True, 'import xarray as xr\n'), ((22550, 22597), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(1)'}), '(n_splits=5, shuffle=True, random_state=1)\n', (22555, 22597), False, 'from sklearn.model_selection import KFold\n'), ((22612, 22656), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['estimator', 'X', 'y'], {'cv': 'kfold'}), '(estimator, X, y, cv=kfold)\n', (22629, 22656), False, 'from sklearn.model_selection import cross_val_predict\n'), ((23968, 23987), 'numpy.ndenumerate', 'np.ndenumerate', (['axs'], {}), '(axs)\n', (23982, 23987), True, 'import numpy as np\n'), ((30158, 30232), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ml_model', 'X', 'y'], {'scoring': 'scores_dict', 'cv': 'gss', 'groups': 'groups'}), '(ml_model, X, y, scoring=scores_dict, cv=gss, groups=groups)\n', (30172, 30232), False, 'from sklearn.model_selection import cross_validate\n'), ((30286, 30344), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ml_model', 'X', 'y'], {'scoring': 'scores_dict', 'cv': 'cv'}), '(ml_model, X, y, scoring=scores_dict, cv=cv)\n', (30300, 30344), False, 'from sklearn.model_selection import cross_validate\n'), ((34276, 34290), 'aux_functions_strat.detrend_ts', 'detrend_ts', (['da'], {}), '(da)\n', (34286, 34290), False, 'from aux_functions_strat import detrend_ts\n'), ((41909, 41940), 'numpy.arange', 'np.arange', (['minimal', '(maximal + 1)'], {}), '(minimal, maximal + 1)\n', (41918, 41940), True, 'import numpy as np\n'), ((690, 711), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (709, 711), False, 'from make_regressors import load_all_regressors\n'), ((742, 763), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (761, 763), False, 'from make_regressors import load_all_regressors\n'), ((796, 817), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (815, 817), False, 'from make_regressors import load_all_regressors\n'), ((1956, 1987), 'numpy.corrcoef', 'np.corrcoef', (['shap_v[i]', 'df_v[i]'], {}), '(shap_v[i], df_v[i])\n', (1967, 1987), True, 'import numpy as np\n'), ((7664, 7682), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7680, 7682), True, 'import cartopy.crs as ccrs\n'), ((7727, 7745), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7743, 7745), True, 'import cartopy.crs as ccrs\n'), ((12787, 12805), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12803, 12805), True, 'import cartopy.crs as ccrs\n'), ((18747, 18779), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', "rds['predict']"], {}), "(y_test, rds['predict'])\n", (18755, 18779), False, 'from sklearn.metrics import r2_score\n'), ((18811, 18844), 'sklearn.metrics.r2_score', 'r2_score', (['y_train', "rds['predict']"], {}), "(y_train, rds['predict'])\n", (18819, 18844), False, 'from sklearn.metrics import r2_score\n'), ((41043, 41081), 'pandas.DataFrame', 'pd.DataFrame', (["gr.cv_results_['params']"], {}), "(gr.cv_results_['params'])\n", (41055, 41081), True, 'import pandas as pd\n'), ((41212, 41250), 'pandas.DataFrame', 'pd.DataFrame', (["gr.cv_results_['params']"], {}), "(gr.cv_results_['params'])\n", (41224, 41250), True, 'import pandas as pd\n'), ((2056, 2079), 'pandas.Series', 'pd.Series', (['feature_list'], {}), '(feature_list)\n', (2065, 2079), True, 'import pandas as pd\n'), ((2081, 2101), 'pandas.Series', 'pd.Series', (['corr_list'], {}), '(corr_list)\n', (2090, 2101), True, 'import pandas as pd\n'), ((43900, 43922), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(10)'], {}), '(-2, 2, 10)\n', (43911, 43922), True, 'import numpy as np\n'), ((43988, 44010), 'numpy.logspace', 'np.logspace', (['(-5)', '(1)', '(14)'], {}), '(-5, 1, 14)\n', (43999, 44010), True, 'import numpy as np\n'), ((45014, 45035), 'numpy.logspace', 'np.logspace', (['(-5)', '(1)', '(7)'], {}), '(-5, 1, 7)\n', (45025, 45035), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Creates lists of molecules on a grid with a +-0.5 pixel
random offset.
Hazen 12/16
"""
import numpy
import random
import storm_analysis.sa_library.sa_h5py as saH5Py
def emittersOnGrid(h5_name, nx, ny, sigma, spacing, zrange, zoffset, seed = 0):
if seed is not None:
random.seed(seed)
if (nx*ny > 1):
curz = -zrange
z_inc = 2.0 * zrange/(nx*ny - 1)
else:
curz = 0.0
z_inc = 0.0
peaks = {"id" : numpy.zeros(nx*ny, dtype = numpy.int32),
"x" : numpy.zeros(nx*ny),
"y" : numpy.zeros(nx*ny),
"z" : numpy.zeros(nx*ny),
"xsigma" : sigma * numpy.ones(nx*ny),
"ysigma" : sigma * numpy.ones(nx*ny)}
curx = spacing
for i in range(nx):
cury = spacing
for j in range(ny):
k = i*ny+j
peaks['x'][k] = curx + random.random() - 0.5
peaks['y'][k] = cury + random.random() - 0.5
peaks['z'][k] = curz + zoffset
# Record emitter id in the 'id' field.
peaks['id'][k] = k
cury += spacing
curz += z_inc
curx += spacing
saH5Py.saveLocalizations(h5_name, peaks)
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = "Create a grid of emitters for simulations.")
parser.add_argument('--bin', dest='hdf5', type=str, required=True,
help = "The name of the HDF5 file to save the emitter locations, etc.")
parser.add_argument('--nx', dest='nx', type=int, required=True,
help = "The grid size in X.")
parser.add_argument('--ny', dest='ny', type=int, required=True,
help = "The grid size in Y.")
parser.add_argument('--sigma', dest='sigma', type=float, required=False, default = 1.5,
help = "PSF sigma in pixels.")
parser.add_argument('--spacing', dest='spacing', type=float, required=True,
help = "The grid spacing in pixels.")
parser.add_argument('--zrange', dest='zrange', type=float, required=False, default = 0.0,
help = "Range for z values in microns, -zrange to zrange")
parser.add_argument('--zoffset', dest='zoffset', type=float, required=False, default = 0.0,
help = "Offset for z values in microns")
args = parser.parse_args()
emittersOnGrid(args.hdf5, args.nx, args.ny, args.sigma, args.spacing, args.zrange, args.zoffset)
|
[
"argparse.ArgumentParser",
"storm_analysis.sa_library.sa_h5py.saveLocalizations",
"numpy.zeros",
"numpy.ones",
"random.random",
"random.seed"
] |
[((1206, 1246), 'storm_analysis.sa_library.sa_h5py.saveLocalizations', 'saH5Py.saveLocalizations', (['h5_name', 'peaks'], {}), '(h5_name, peaks)\n', (1230, 1246), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((1312, 1398), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a grid of emitters for simulations."""'}), "(description=\n 'Create a grid of emitters for simulations.')\n", (1335, 1398), False, 'import argparse\n'), ((311, 328), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (322, 328), False, 'import random\n'), ((484, 523), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {'dtype': 'numpy.int32'}), '(nx * ny, dtype=numpy.int32)\n', (495, 523), False, 'import numpy\n'), ((544, 564), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (555, 564), False, 'import numpy\n'), ((583, 603), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (594, 603), False, 'import numpy\n'), ((622, 642), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (633, 642), False, 'import numpy\n'), ((674, 693), 'numpy.ones', 'numpy.ones', (['(nx * ny)'], {}), '(nx * ny)\n', (684, 693), False, 'import numpy\n'), ((725, 744), 'numpy.ones', 'numpy.ones', (['(nx * ny)'], {}), '(nx * ny)\n', (735, 744), False, 'import numpy\n'), ((897, 912), 'random.random', 'random.random', ([], {}), '()\n', (910, 912), False, 'import random\n'), ((954, 969), 'random.random', 'random.random', ([], {}), '()\n', (967, 969), False, 'import random\n')]
|
import pytest
import os
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV, assert_eq_with_retry
ENABLE_DICT_CONFIG = ['configs/enable_dictionaries.xml']
DICTIONARY_FILES = ['configs/dictionaries/cache.xml']
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', main_configs=ENABLE_DICT_CONFIG+DICTIONARY_FILES)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
instance.query('''
CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS test.source;
CREATE TABLE test.source (id UInt64, key0 UInt8, key0_str String, key1 UInt8,
StartDate Date, EndDate Date,
UInt8_ UInt8, UInt16_ UInt16, UInt32_ UInt32, UInt64_ UInt64,
Int8_ Int8, Int16_ Int16, Int32_ Int32, Int64_ Int64,
Float32_ Float32, Float64_ Float64,
String_ String,
Date_ Date, DateTime_ DateTime, Parent UInt64) ENGINE=Log;
''')
yield cluster
finally:
cluster.shutdown()
def test_null_value(started_cluster):
query = instance.query
assert query("select dictGetUInt8('cache', 'UInt8_', toUInt64(12121212))") == "1\n"
assert query("select dictGetString('cache', 'String_', toUInt64(12121212))") == "implicit-default\n"
assert query("select dictGetDate('cache', 'Date_', toUInt64(12121212))") == "2015-11-25\n"
# Check, that empty null_value interprets as default value
assert query("select dictGetUInt64('cache', 'UInt64_', toUInt64(12121212))") == "0\n"
assert query("select toTimeZone(dictGetDateTime('cache', 'DateTime_', toUInt64(12121212)), 'UTC')") == "1970-01-01 00:00:00\n"
|
[
"helpers.cluster.ClickHouseCluster",
"pytest.fixture"
] |
[((250, 277), 'helpers.cluster.ClickHouseCluster', 'ClickHouseCluster', (['__file__'], {}), '(__file__)\n', (267, 277), False, 'from helpers.cluster import ClickHouseCluster\n'), ((375, 405), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (389, 405), False, 'import pytest\n')]
|
"""
Condel
========
"""
#import distribute_setup
#distribute_setup.use_setuptools()
from setuptools import setup, find_packages
from condel import VERSION, AUTHORS, AUTHORS_EMAIL
setup(
name = "Condel",
version = VERSION,
packages = find_packages(),
install_requires = [
"bgcore>=0.3.1",
],
scripts = [
#"bin/condel-ann-add",
#"bin/condel-ann-get"
],
entry_points = {
'console_scripts': [
#'condel-training-sets = condel.command.training_sets:main',
#'condel-weights = condel.command.weights:main',
#'condel-plot-stats = condel.command.plot_stats:main',
#'condel-plot-roc = condel.command.plot_roc:main',
#'condel-calc = condel.command.calc:main',
#'condel-calc-label = condel.command.calc_label:main'
]
},
# metadata for upload to PyPI
author = AUTHORS,
author_email = AUTHORS_EMAIL,
description = "Condel",
license = "UPF Free Source Code",
keywords = "",
url = "https://bitbucket.org/bbglab/condel",
long_description = __doc__,
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Bioinformatics",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
|
[
"setuptools.find_packages"
] |
[((241, 256), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (254, 256), False, 'from setuptools import setup, find_packages\n')]
|
# -*- coding: future_fstrings -*-
import logging
from .. import loader, utils
logger = logging.getLogger(__name__)
def register(cb):
cb(ForwardMod())
class ForwardMod(loader.Module):
"""Forwards messages"""
def __init__(self):
self.commands = {"fwdall":self.fwdallcmd}
self.config = {}
self.name = "Forwarding"
async def fwdallcmd(self, message):
""".fwdall <to_user>
Forwards all messages in chat"""
user = utils.get_args(message)[0]
msgs = []
async for msg in message.client.iter_messages(
entity=message.to_id,
reverse=True):
msgs += [msg.id]
if len(msgs) >= 100:
logger.debug(msgs)
await message.client.forward_messages(user, msgs, message.from_id)
msgs = []
# No async list comprehension in 3.5
if len(msgs) > 0:
logger.debug(msgs)
await message.client.forward_messages(user, msgs, message.from_id)
|
[
"logging.getLogger"
] |
[((90, 117), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (107, 117), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_auto_20140907_0049'),
]
operations = [
migrations.AlterField(
model_name='section',
name='crn',
field=models.IntegerField(db_index=True),
),
]
|
[
"django.db.models.IntegerField"
] |
[((350, 384), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (369, 384), False, 'from django.db import models, migrations\n')]
|
"""
desisim.pixsim
==============
Tools for DESI pixel level simulations using specter
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import os.path
import random
from time import asctime
import socket
import astropy.units as u
import numpy as np
import desimodel.io
import desispec.io
from desispec.image import Image
import desispec.cosmics
from . import obs, io
from desiutil.log import get_logger
log = get_logger()
# Inhibit download of IERS-A catalog, even from a good server.
# Note that this is triggered by a call to astropy.time.Time(),
# which is subsequently used to compute sidereal_time().
# It's the initialization of astropy.time.Time() itself that makes the call.
from desiutil.iers import freeze_iers
from astropy.time import Time
def simulate_exposure(simspecfile, rawfile, cameras=None,
ccdshape=None, simpixfile=None, addcosmics=None, comm=None,
**kwargs):
"""
Simulate frames from an exposure, including I/O
Args:
simspecfile: input simspec format file with spectra
rawfile: output raw data file to write
Options:
cameras: str or list of str, e.g. b0, r1, .. z9
ccdshape: (npix_y, npix_x) primarily used to limit memory while testing
simpixfile: output file for noiseless truth pixels
addcosmics: if True (must be specified via command input), add cosmics from real data
comm: MPI communicator object
Additional keyword args are passed to pixsim.simulate()
For a lower-level pixel simulation interface that doesn't perform I/O,
see pixsim.simulate()
Note: call desi_preproc or desispec.preproc.preproc to pre-process the
output desi*.fits file for overscan subtraction, noise estimation, etc.
"""
#- Split communicator by nodes; each node processes N frames
#- Assumes / requires equal number of ranks per node
if comm is not None:
rank, size = comm.rank, comm.size
num_nodes = mpi_count_nodes(comm)
comm_node, node_index, num_nodes = mpi_split_by_node(comm, 1)
node_rank = comm_node.rank
node_size = comm_node.size
else:
log.debug('Not using MPI')
rank, size = 0, 1
comm_node = None
node_index = 0
num_nodes = 1
node_rank = 0
node_size = 1
if rank == 0:
log.debug('Starting simulate_exposure at {}'.format(asctime()))
if cameras is None:
if rank == 0:
from astropy.io import fits
fibermap = fits.getdata(simspecfile, 'FIBERMAP')
cameras = io.fibers2cameras(fibermap['FIBER'])
log.debug('Found cameras {} in input simspec file'.format(cameras))
if len(cameras) % num_nodes != 0:
raise ValueError('Number of cameras {} should be evenly divisible by number of nodes {}'.format(
len(cameras), num_nodes))
if comm is not None:
cameras = comm.bcast(cameras, root=0)
#- Fail early if camera alreaady in output file
if rank == 0 and os.path.exists(rawfile):
from astropy.io import fits
err = False
fx = fits.open(rawfile)
for camera in cameras:
if camera in fx:
log.error('Camera {} already in {}'.format(camera, rawfile))
err = True
if err:
raise ValueError('Some cameras already in output file')
#- Read simspec input; I/O layer handles MPI broadcasting
if rank == 0:
log.debug('Reading simspec at {}'.format(asctime()))
mycameras = cameras[node_index::num_nodes]
if node_rank == 0:
log.info("Assigning cameras {} to comm_exp node {}".format(mycameras, node_index))
simspec = io.read_simspec(simspecfile, cameras=mycameras,
readflux=False, comm=comm)
night = simspec.header['NIGHT']
expid = simspec.header['EXPID']
if rank == 0:
log.debug('Reading PSFs at {}'.format(asctime()))
psfs = dict()
#need to initialize previous channel
previous_channel = 'a'
for camera in mycameras:
#- Note: current PSF object can't be pickled and thus every
#- rank must read it instead of rank 0 read + bcast
channel = camera[0]
if channel not in psfs:
log.info('Reading {} PSF at {}'.format(channel, asctime()))
psfs[channel] = desimodel.io.load_psf(channel)
#- Trim effective CCD size; mainly to limit memory for testing
if ccdshape is not None:
psfs[channel].npix_y, psfs[channel].npix_x = ccdshape
psf = psfs[channel]
cosmics=None
#avoid re-broadcasting cosmics if we can
if previous_channel != channel:
if (addcosmics is True) and (node_rank == 0):
cosmics_file = io.find_cosmics(camera, simspec.header['EXPTIME'])
log.info('Reading cosmics templates {} at {}'.format(
cosmics_file, asctime()))
shape = (psf.npix_y, psf.npix_x)
cosmics = io.read_cosmics(cosmics_file, expid, shape=shape)
if (addcosmics is True) and (comm_node is not None):
if node_rank == 0:
log.info('Broadcasting cosmics at {}'.format(asctime()))
cosmics = comm_node.bcast(cosmics, root=0)
else:
log.debug("Cosmics not requested")
if node_rank == 0:
log.info("Starting simulate for camera {} on node {}".format(camera,node_index))
image, rawpix, truepix = simulate(camera, simspec, psf, comm=comm_node, preproc=False, cosmics=cosmics, **kwargs)
#- Use input communicator as barrier since multiple sub-communicators
#- will write to the same output file
if rank == 0:
log.debug('Writing outputs at {}'.format(asctime()))
tmprawfile = rawfile + '.tmp'
if comm is not None:
for i in range(comm.size):
if (i == comm.rank) and (comm_node.rank == 0):
desispec.io.write_raw(tmprawfile, rawpix, image.meta,
camera=camera)
if simpixfile is not None:
io.write_simpix(simpixfile, truepix, camera=camera,
meta=image.meta)
comm.barrier()
else:
desispec.io.write_raw(tmprawfile, rawpix, image.meta, camera=camera)
if simpixfile is not None:
io.write_simpix(simpixfile, truepix, camera=camera,
meta=image.meta)
if rank == 0:
log.info('Wrote {}'.format(rawfile))
log.debug('done at {}'.format(asctime()))
previous_channel = channel
#- All done; rename temporary raw file to final location
if comm is None or comm.rank == 0:
os.rename(tmprawfile, rawfile)
def simulate(camera, simspec, psf, nspec=None, ncpu=None,
cosmics=None, wavemin=None, wavemax=None, preproc=True, comm=None):
"""Run pixel-level simulation of input spectra
Args:
camera (string) : b0, r1, .. z9
simspec : desispec.io.SimSpec object from desispec.io.read_simspec()
psf : subclass of specter.psf.psf.PSF, e.g. from desimodel.io.load_psf()
Options:
nspec (int): number of spectra to simulate
ncpu (int): number of CPU cores to use in parallel
cosmics (desispec.image.Image): e.g. from desisim.io.read_cosmics()
wavemin (float): minimum wavelength range to simulate
wavemax (float): maximum wavelength range to simulate
preproc (boolean, optional) : also preprocess raw data (default True)
Returns:
(image, rawpix, truepix) tuple, where image is the preproc Image object
(only header is meaningful if preproc=False), rawpix is a 2D
ndarray of unprocessed raw pixel data, and truepix is a 2D ndarray
of truth for image.pix
"""
freeze_iers()
if (comm is None) or (comm.rank == 0):
log.info('Starting pixsim.simulate camera {} at {}'.format(camera,
asctime()))
#- parse camera name into channel and spectrograph number
channel = camera[0].lower()
ispec = int(camera[1])
assert channel in 'brz', \
'unrecognized channel {} camera {}'.format(channel, camera)
assert 0 <= ispec < 10, \
'unrecognized spectrograph {} camera {}'.format(ispec, camera)
assert len(camera) == 2, \
'unrecognized camera {}'.format(camera)
#- Load DESI parameters
params = desimodel.io.load_desiparams()
#- this is not necessarily true, the truth in is the fibermap
nfibers = params['spectro']['nfibers']
phot = simspec.cameras[camera].phot
if simspec.cameras[camera].skyphot is not None:
phot += simspec.cameras[camera].skyphot
if nspec is not None:
phot = phot[0:nspec]
else:
nspec = phot.shape[0]
#- Trim wavelengths if needed
wave = simspec.cameras[camera].wave
if wavemin is not None:
ii = (wave >= wavemin)
phot = phot[:, ii]
wave = wave[ii]
if wavemax is not None:
ii = (wave <= wavemax)
phot = phot[:, ii]
wave = wave[ii]
#- Project to image and append that to file
if (comm is None) or (comm.rank == 0):
log.info('Starting {} projection at {}'.format(camera, asctime()))
# The returned true pixel values will only exist on rank 0 in the
# MPI case. Otherwise it will be None.
truepix = parallel_project(psf, wave, phot, ncpu=ncpu, comm=comm)
if (comm is None) or (comm.rank == 0):
log.info('Finished {} projection at {}'.format(camera,
asctime()))
image = None
rawpix = None
if (comm is None) or (comm.rank == 0):
#- Start metadata header
header = simspec.header.copy()
header['CAMERA'] = camera
header['DOSVER'] = 'SIM'
header['FEEVER'] = 'SIM'
header['DETECTOR'] = 'SIM'
#- Add cosmics from library of dark images
ny = truepix.shape[0] // 2
nx = truepix.shape[1] // 2
if cosmics is not None:
# set to zeros values with mask bit 0 (= dead column or hot pixels)
cosmics_pix = cosmics.pix*((cosmics.mask&1)==0)
pix = np.random.poisson(truepix) + cosmics_pix
try: #- cosmics templates >= v0.3
rdnoiseA = cosmics.meta['OBSRDNA']
rdnoiseB = cosmics.meta['OBSRDNB']
rdnoiseC = cosmics.meta['OBSRDNC']
rdnoiseD = cosmics.meta['OBSRDND']
except KeyError: #- cosmics templates <= v0.2
print(cosmic.meta)
rdnoiseA = cosmics.meta['RDNOISE0']
rdnoiseB = cosmics.meta['RDNOISE1']
rdnoiseC = cosmics.meta['RDNOISE2']
rdnoiseD = cosmics.meta['RDNOISE3']
else:
pix = truepix
readnoise = params['ccd'][channel]['readnoise']
rdnoiseA = rdnoiseB = rdnoiseC = rdnoiseD = readnoise
#- data already has noise if cosmics were added
noisydata = (cosmics is not None)
#- Split by amplifier and expand into raw data
nprescan = params['ccd'][channel]['prescanpixels']
if 'overscanpixels' in params['ccd'][channel]:
noverscan = params['ccd'][channel]['overscanpixels']
else:
noverscan = 50
#- Reproducibly random overscan bias level offsets across diff exp
assert channel in 'brz'
if channel == 'b':
irand = ispec
elif channel == 'r':
irand = 10 + ispec
elif channel == 'z':
irand = 20 + ispec
seeds = np.random.RandomState(0).randint(2**32-1, size=30)
rand = np.random.RandomState(seeds[irand])
nyraw = ny
nxraw = nx + nprescan + noverscan
rawpix = np.empty( (nyraw*2, nxraw*2), dtype=np.int32 )
gain = params['ccd'][channel]['gain']
#- Amp A/1 Lower Left
rawpix[0:nyraw, 0:nxraw] = \
photpix2raw(pix[0:ny, 0:nx], gain, rdnoiseA,
readorder='lr', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
#- Amp B/2 Lower Right
rawpix[0:nyraw, nxraw:nxraw+nxraw] = \
photpix2raw(pix[0:ny, nx:nx+nx], gain, rdnoiseB,
readorder='rl', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
#- Amp C/3 Upper Left
rawpix[nyraw:nyraw+nyraw, 0:nxraw] = \
photpix2raw(pix[ny:ny+ny, 0:nx], gain, rdnoiseC,
readorder='lr', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
#- Amp D/4 Upper Right
rawpix[nyraw:nyraw+nyraw, nxraw:nxraw+nxraw] = \
photpix2raw(pix[ny:ny+ny, nx:nx+nx], gain, rdnoiseD,
readorder='rl', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
def xyslice2header(xyslice):
'''
convert 2D slice into IRAF style [a:b,c:d] header value
e.g. xyslice2header(np.s_[0:10, 5:20]) -> '[6:20,1:10]'
'''
yy, xx = xyslice
value = '[{}:{},{}:{}]'.format(xx.start+1, xx.stop,
yy.start+1, yy.stop)
return value
#- Amp order from DESI-1964 (previously 1-4 instead of A-D)
#- C D
#- A B
xoffset = nprescan+nx+noverscan
header['PRESECA'] = xyslice2header(np.s_[0:nyraw, 0:0+nprescan])
header['DATASECA'] = xyslice2header(np.s_[0:nyraw, nprescan:nprescan+nx])
header['BIASSECA'] = xyslice2header(np.s_[0:nyraw, nprescan+nx:nprescan+nx+noverscan])
header['CCDSECA'] = xyslice2header(np.s_[0:ny, 0:nx])
header['PRESECB'] = xyslice2header(np.s_[0:nyraw, xoffset+noverscan+nx:xoffset+noverscan+nx+nprescan])
header['DATASECB'] = xyslice2header(np.s_[0:nyraw, xoffset+noverscan:xoffset+noverscan+nx])
header['BIASSECB'] = xyslice2header(np.s_[0:nyraw, xoffset:xoffset+noverscan])
header['CCDSECB'] = xyslice2header(np.s_[0:ny, nx:2*nx])
header['PRESECC'] = xyslice2header(np.s_[nyraw:2*nyraw, 0:0+nprescan])
header['DATASECC'] = xyslice2header(np.s_[nyraw:2*nyraw, nprescan:nprescan+nx])
header['BIASSECC'] = xyslice2header(np.s_[nyraw:2*nyraw, nprescan+nx:nprescan+nx+noverscan])
header['CCDSECC'] = xyslice2header(np.s_[ny:2*ny, 0:nx])
header['PRESECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset+noverscan+nx:xoffset+noverscan+nx+nprescan])
header['DATASECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset+noverscan:xoffset+noverscan+nx])
header['BIASSECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset:xoffset+noverscan])
header['CCDSECD'] = xyslice2header(np.s_[ny:2*ny, nx:2*nx])
#- Add additional keywords to mimic real raw data
header['INSTRUME'] = 'DESI'
header['PROCTYPE'] = 'RAW'
header['PRODTYPE'] = 'image'
header['EXPFRAME'] = 0
header['REQTIME'] = simspec.header['EXPTIME']
header['TIMESYS'] = 'UTC'
#- DATE-OBS format YEAR-MM-DDThh:mm:ss.sss -> OBSID kpnoYEARMMDDthhmmss
header['OBSID']='kp4m'+header['DATE-OBS'][0:19].replace('-','').replace(':','').lower()
header['TIME-OBS'] = header['DATE-OBS'].split('T')[1]
header['DELTARA'] = 0.0
header['DELTADEC'] = 0.0
header['SPECGRPH'] = ispec
header['CCDNAME'] = 'CCDS' + str(ispec) + str(channel).upper()
header['CCDPREP'] = 'purge,clear'
header['CCDSIZE'] = str(rawpix.shape)
header['CCDTEMP'] = 850.0
header['CPUTEMP'] = 63.7
header['CASETEMP'] = 62.8
header['CCDTMING'] = 'sim_timing.txt'
header['CCDCFG'] = 'sim.cfg'
header['SETTINGS'] = 'sim_detectors.json'
header['VESSEL'] = 7 #- I don't know what this is
header['FEEBOX'] = 'sim097'
header['PGAGAIN'] = 5
header['OCSVER'] = 'SIM'
header['CONSTVER'] = 'SIM'
header['BLDTIME'] = 0.35
header['DIGITIME'] = 61.9
#- Remove some spurious header keywords from upstream
if 'BUNIT' in header and header['BUNIT'] == 'Angstrom':
del header['BUNIT']
if 'MJD' in header and 'MJD-OBS' not in header:
header['MJD-OBS'] = header['MJD']
del header['MJD']
for key in ['RA', 'DEC']:
if key in header:
del header[key]
#- Drive MJD-OBS from DATE-OBS if needed
if 'MJD-OBS' not in header:
header['MJD-OBS'] = Time(header['DATE-OBS']).mjd
#- from http://www-kpno.kpno.noao.edu/kpno-misc/mayall_params.html
kpno_longitude = -(111. + 35/60. + 59.6/3600) * u.deg
#- Convert DATE-OBS to sexigesimal (sigh) Local Sidereal Time
#- Use mean ST as close enough for sims to avoid nutation calc
t = Time(header['DATE-OBS'])
st = t.sidereal_time('mean', kpno_longitude).to('deg').value
hour = st/15
minute = (hour % 1)*60
second = (minute % 1)*60
header['ST'] = '{:02d}:{:02d}:{:0.3f}'.format(
int(hour), int(minute), second)
if preproc:
log.debug('Running preprocessing at {}'.format(asctime()))
image = desispec.preproc.preproc(rawpix, header, primary_header=simspec.header)
else:
log.debug('Skipping preprocessing')
image = Image(np.zeros(truepix.shape), np.zeros(truepix.shape), meta=header)
if (comm is None) or (comm.rank == 0):
log.info('Finished pixsim.simulate for camera {} at {}'.format(camera,
asctime()))
return image, rawpix, truepix
def photpix2raw(phot, gain=1.0, readnoise=3.0, offset=None,
nprescan=7, noverscan=50, readorder='lr', noisydata=True):
'''
Add prescan, overscan, noise, and integerization to an image
Args:
phot: 2D float array of mean input photons per pixel
gain (float, optional): electrons/ADU
readnoise (float, optional): CCD readnoise in electrons
offset (float, optional): bias offset to add
nprescan (int, optional): number of prescan pixels to add
noverscan (int, optional): number of overscan pixels to add
readorder (str, optional): 'lr' or 'rl' to indicate readout order
'lr' : add prescan on left and overscan on right of image
'rl' : add prescan on right and overscan on left of image
noisydata (boolean, optional) : if True, don't add noise,
e.g. because input signal already had noise from a cosmics image
Returns 2D integer ndarray:
image = int((poisson(phot) + offset + gauss(readnoise))/gain)
Integerization happens twice: the mean photons are poisson sampled
into integers, but then offets, readnoise, and gain are applied before
resampling into ADU integers
This is intended to be used per-amplifier, not for an entire CCD image.
'''
ny = phot.shape[0]
nx = phot.shape[1] + nprescan + noverscan
#- reading from right to left is effectively swapping pre/overscan counts
if readorder.lower() in ('rl', 'rightleft'):
nprescan, noverscan = noverscan, nprescan
img = np.zeros((ny, nx), dtype=float)
img[:, nprescan:nprescan+phot.shape[1]] = phot
if offset is None:
offset = np.random.uniform(100, 200)
if noisydata:
#- Data already has noise; just add offset and noise to pre/overscan
img += offset
img[0:ny, 0:nprescan] += np.random.normal(scale=readnoise, size=(ny, nprescan))
ix = phot.shape[1] + nprescan
img[0:ny, ix:ix+noverscan] += np.random.normal(scale=readnoise, size=(ny, noverscan))
img /= gain
else:
#- Add offset and noise to everything
noise = np.random.normal(loc=offset, scale=readnoise, size=img.shape)
img = np.random.poisson(img) + noise
img /= gain
return img.astype(np.int32)
#- Helper function for multiprocessing parallel project
def _project(args):
"""
Helper function to project photons onto a subimage
Args:
tuple/array of [psf, wave, phot, specmin]
Returns (xyrange, subimage) such that
xmin, xmax, ymin, ymax = xyrange
image[ymin:ymax, xmin:xmax] += subimage
"""
try:
psf, wave, phot, specmin = args
nspec = phot.shape[0]
if phot.shape[-1] != wave.shape[-1]:
raise ValueError('phot.shape {} vs. wave.shape {} mismatch'.format(phot.shape, wave.shape))
xyrange = psf.xyrange( [specmin, specmin+nspec], wave )
img = psf.project(wave, phot, specmin=specmin, xyrange=xyrange)
return (xyrange, img)
except Exception as e:
if os.getenv('UNITTEST_SILENT') is None:
import traceback
print('-'*60)
print('ERROR in _project', psf.wmin, psf.wmax, wave[0], wave[-1], phot.shape, specmin)
traceback.print_exc()
print('-'*60)
raise e
#- Move this into specter itself?
def parallel_project(psf, wave, phot, specmin=0, ncpu=None, comm=None):
"""
Using psf, project phot[nspec, nw] vs. wave[nw] onto image
Return 2D image
"""
img = None
if comm is not None:
# MPI version
# Get a smaller communicator if not enough spectra
nspec = phot.shape[0]
if nspec < comm.size:
keep = int(comm.rank < nspec)
comm = comm.Split(color=keep)
if not keep:
return None
specs = np.arange(phot.shape[0], dtype=np.int32)
myspecs = np.array_split(specs, comm.size)[comm.rank]
nspec = phot.shape[0]
iispec = np.linspace(specmin, nspec, int(comm.size+1)).astype(int)
args = list()
if comm.rank == 0:
for i in range(comm.size):
if iispec[i+1] > iispec[i]:
args.append( [psf, wave, phot[iispec[i]:iispec[i+1]], iispec[i]] )
args=comm.scatter(args,root=0)
#now that all ranks have args, we can call _project
xy_subimg=_project(args)
#_project calls project calls spotgrid etc
xy_subimg=comm.gather(xy_subimg,root=0)
if comm.rank ==0:
#now all the data should be back at rank 0
# use same technique as multiprocessing to recombine the data
img = np.zeros( (psf.npix_y, psf.npix_x) )
for xyrange, subimg in xy_subimg:
xmin, xmax, ymin, ymax = xyrange
img[ymin:ymax, xmin:xmax] += subimg
#end of mpi section
else:
import multiprocessing as mp
if ncpu is None:
# Avoid hyperthreading
ncpu = mp.cpu_count() // 2
if ncpu <= 1:
#- Serial version
log.debug('Not using multiprocessing (ncpu={})'.format(ncpu))
img = psf.project(wave, phot, specmin=specmin)
else:
#- multiprocessing version
#- Split the spectra into ncpu groups
log.debug('Using multiprocessing (ncpu={})'.format(ncpu))
nspec = phot.shape[0]
iispec = np.linspace(specmin, nspec, ncpu+1).astype(int)
args = list()
for i in range(ncpu):
if iispec[i+1] > iispec[i]: #- can be false if nspec < ncpu
args.append( [psf, wave, phot[iispec[i]:iispec[i+1]], iispec[i]] )
#- Create pool of workers to do the projection using _project
#- xyrange, subimg = _project( [psf, wave, phot, specmin] )
pool = mp.Pool(ncpu)
xy_subimg = pool.map(_project, args)
#print("xy_subimg from pool")
#print(xy_subimg)
#print(len(xy_subimg))
img = np.zeros( (psf.npix_y, psf.npix_x) )
for xyrange, subimg in xy_subimg:
xmin, xmax, ymin, ymax = xyrange
img[ymin:ymax, xmin:xmax] += subimg
#- Prevents hangs of Travis tests
pool.close()
pool.join()
return img
def get_nodes_per_exp(nnodes,nexposures,ncameras,user_nodes_per_comm_exp=None):
"""
Calculate how many nodes to use per exposure
Args:
nnodes: number of nodes in MPI COMM_WORLD (not number of ranks)
nexposures: number of exposures to process
ncameras: number of cameras per exposure
user_nodes_per_comm_exp (int, optional): user override of number of
nodes to use; used to check requirements
Returns number of nodes to include in sub-communicators used to process
individual exposures
Notes:
* Uses the largest number of nodes per exposure that will still
result in efficient node usage
* requires that (nexposures*ncameras) / nnodes = int
* the derived nodes_per_comm_exp * nexposures / nodes = int
* See desisim.test.test_pixsim.test_get_nodes_per_exp() for examples
* if user_nodes_per_comm_exp is given, requires that
GreatestCommonDivisor(nnodes, ncameras) / user_nodes_per_comm_exp = int
"""
from math import gcd
import desiutil.log as logging
log = logging.get_logger()
log.setLevel(logging.INFO)
#check if nframes is evenly divisible by nnodes
nframes = ncameras*nexposures
if nframes % nnodes !=0:
### msg=("nframes {} must be evenly divisible by nnodes {}, try again".format(nframes, nnodes))
### raise ValueError(msg)
msg=("nframes {} is not evenly divisible by nnodes {}; packing will be inefficient".format(nframes, nnodes))
log.warning(msg)
else:
log.debug("nframes {} is evenly divisible by nnodes {}, check passed".format(nframes, nnodes))
#find greatest common divisor between nnodes and ncameras
#greatest common divisor = greatest common factor
#we use python's built in gcd
greatest_common_factor=gcd(nnodes,ncameras)
#the greatest common factor must be greater than one UNLESS we are on one node
if nnodes > 1:
if greatest_common_factor == 1:
msg=("greatest common factor {} between nnodes {} and nframes {} must be larger than one, try again".format(greatest_common_factor, nnodes, nframes))
raise ValueError(msg)
else:
log.debug("greatest common factor {} between nnodes {} and nframes {} is greater than one, check passed".format(greatest_common_factor, nnodes, nframes))
#check to make sure the user hasn't specified a really asinine value of user_nodes_per_comm_exp
if user_nodes_per_comm_exp is not None:
if greatest_common_factor % user_nodes_per_comm_exp !=0:
msg=("user-specified value of user_nodes_per_comm_exp {} is bad, try again".format(user_nodes_per_comm_exp))
raise ValueError(msg)
else:
log.debug("user-specified value of user_nodes_per_comm_exp {} is good, check passed".format(user_nodes_per_comm_exp))
nodes_per_comm_exp=user_nodes_per_comm_exp
#if the user didn't specify anything, use the greatest common factor
if user_nodes_per_comm_exp is None:
nodes_per_comm_exp=greatest_common_factor
#finally check to make sure exposures*gcf/nnodes is an integer to avoid inefficient node use
if (nexposures*nodes_per_comm_exp) % nnodes != 0:
### msg=("nexposures {} * nodes_per_comm_exp {} does not divide evenly into nnodes {}, try again".format(nexposures, nodes_per_comm_exp, nnodes))
### raise ValueError(msg)
msg=("nexposures {} * nodes_per_comm_exp {} does not divide evenly into nnodes {}; packing will be inefficient".format(nexposures, nodes_per_comm_exp, nnodes))
log.warning(msg)
else:
log.debug("nexposures {} * nodes_per_comm_exp {} divides evenly into nnodes {}, check passed".format(nexposures, nodes_per_comm_exp, nnodes))
return nodes_per_comm_exp
#-------------------------------------------------------------------------
#- MPI utility functions
#- These functions assist with splitting a communicator across node boundaries.
#- That constraint isn't required by MPI, but can be convenient for humans
#- thinking about "I want to process one camera with one node" or "I want to
#- process 6 exposures with 20 nodes using 10 nodes per exposure"
def mpi_count_nodes(comm):
'''
Return the number of nodes in this communicator
'''
nodenames = comm.allgather(socket.gethostname())
num_nodes=len(set(nodenames))
return num_nodes
def mpi_split_by_node(comm, nodes_per_communicator):
'''
Split an MPI communicator into sub-communicators with integer numbers
of nodes per communicator
Args:
comm: MPI communicator
nodes_per_communicator: number of nodes per sub-communicator
Returns:
MPI sub-communicator, node_index, total_num_nodes
Notes:
* total number of nodes in original communicator must be an integer
multiple of nodes_per_communicator
* if comm is split into N sub-communicators, node_index is the index
of which of the N is returned for this rank
* total_num_nodes = number of nodes in original communicator
'''
num_nodes = mpi_count_nodes(comm)
if comm.size % num_nodes != 0:
raise ValueError('Variable number of ranks per node')
if num_nodes % nodes_per_communicator != 0:
raise ValueError('Input number of nodes {} must be divisible by nodes_per_communicator {}'.format(
num_nodes, nodes_per_communicator))
ranks_per_communicator = comm.size // (num_nodes // nodes_per_communicator)
node_index = comm.rank // ranks_per_communicator
comm_node = comm.Split(color = node_index)
return comm_node, node_index, num_nodes
|
[
"numpy.empty",
"numpy.arange",
"numpy.random.normal",
"multiprocessing.cpu_count",
"desiutil.log.get_logger",
"time.asctime",
"traceback.print_exc",
"astropy.io.fits.getdata",
"os.path.exists",
"numpy.random.RandomState",
"socket.gethostname",
"desiutil.iers.freeze_iers",
"numpy.random.poisson",
"numpy.linspace",
"astropy.time.Time",
"os.rename",
"math.gcd",
"astropy.io.fits.open",
"multiprocessing.Pool",
"os.getenv",
"numpy.random.uniform",
"numpy.zeros",
"numpy.array_split"
] |
[((457, 469), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (467, 469), False, 'from desiutil.log import get_logger\n'), ((8037, 8050), 'desiutil.iers.freeze_iers', 'freeze_iers', ([], {}), '()\n', (8048, 8050), False, 'from desiutil.iers import freeze_iers\n'), ((19668, 19699), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'float'}), '((ny, nx), dtype=float)\n', (19676, 19699), True, 'import numpy as np\n'), ((25633, 25653), 'desiutil.log.get_logger', 'logging.get_logger', ([], {}), '()\n', (25651, 25653), True, 'import desiutil.log as logging\n'), ((26372, 26393), 'math.gcd', 'gcd', (['nnodes', 'ncameras'], {}), '(nnodes, ncameras)\n', (26375, 26393), False, 'from math import gcd\n'), ((3073, 3096), 'os.path.exists', 'os.path.exists', (['rawfile'], {}), '(rawfile)\n', (3087, 3096), False, 'import os\n'), ((3167, 3185), 'astropy.io.fits.open', 'fits.open', (['rawfile'], {}), '(rawfile)\n', (3176, 3185), False, 'from astropy.io import fits\n'), ((6918, 6948), 'os.rename', 'os.rename', (['tmprawfile', 'rawfile'], {}), '(tmprawfile, rawfile)\n', (6927, 6948), False, 'import os\n'), ((11888, 11923), 'numpy.random.RandomState', 'np.random.RandomState', (['seeds[irand]'], {}), '(seeds[irand])\n', (11909, 11923), True, 'import numpy as np\n'), ((12003, 12051), 'numpy.empty', 'np.empty', (['(nyraw * 2, nxraw * 2)'], {'dtype': 'np.int32'}), '((nyraw * 2, nxraw * 2), dtype=np.int32)\n', (12011, 12051), True, 'import numpy as np\n'), ((17319, 17343), 'astropy.time.Time', 'Time', (["header['DATE-OBS']"], {}), "(header['DATE-OBS'])\n", (17323, 17343), False, 'from astropy.time import Time\n'), ((19792, 19819), 'numpy.random.uniform', 'np.random.uniform', (['(100)', '(200)'], {}), '(100, 200)\n', (19809, 19819), True, 'import numpy as np\n'), ((19971, 20025), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'readnoise', 'size': '(ny, nprescan)'}), '(scale=readnoise, size=(ny, nprescan))\n', (19987, 20025), True, 'import numpy as np\n'), ((20102, 20157), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'readnoise', 'size': '(ny, noverscan)'}), '(scale=readnoise, size=(ny, noverscan))\n', (20118, 20157), True, 'import numpy as np\n'), ((20251, 20312), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'offset', 'scale': 'readnoise', 'size': 'img.shape'}), '(loc=offset, scale=readnoise, size=img.shape)\n', (20267, 20312), True, 'import numpy as np\n'), ((21999, 22039), 'numpy.arange', 'np.arange', (['phot.shape[0]'], {'dtype': 'np.int32'}), '(phot.shape[0], dtype=np.int32)\n', (22008, 22039), True, 'import numpy as np\n'), ((28889, 28909), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (28907, 28909), False, 'import socket\n'), ((2545, 2582), 'astropy.io.fits.getdata', 'fits.getdata', (['simspecfile', '"""FIBERMAP"""'], {}), "(simspecfile, 'FIBERMAP')\n", (2557, 2582), False, 'from astropy.io import fits\n'), ((20327, 20349), 'numpy.random.poisson', 'np.random.poisson', (['img'], {}), '(img)\n', (20344, 20349), True, 'import numpy as np\n'), ((22058, 22090), 'numpy.array_split', 'np.array_split', (['specs', 'comm.size'], {}), '(specs, comm.size)\n', (22072, 22090), True, 'import numpy as np\n'), ((22834, 22868), 'numpy.zeros', 'np.zeros', (['(psf.npix_y, psf.npix_x)'], {}), '((psf.npix_y, psf.npix_x))\n', (22842, 22868), True, 'import numpy as np\n'), ((24041, 24054), 'multiprocessing.Pool', 'mp.Pool', (['ncpu'], {}), '(ncpu)\n', (24048, 24054), True, 'import multiprocessing as mp\n'), ((24231, 24265), 'numpy.zeros', 'np.zeros', (['(psf.npix_y, psf.npix_x)'], {}), '((psf.npix_y, psf.npix_x))\n', (24239, 24265), True, 'import numpy as np\n'), ((2423, 2432), 'time.asctime', 'asctime', ([], {}), '()\n', (2430, 2432), False, 'from time import asctime\n'), ((3564, 3573), 'time.asctime', 'asctime', ([], {}), '()\n', (3571, 3573), False, 'from time import asctime\n'), ((3973, 3982), 'time.asctime', 'asctime', ([], {}), '()\n', (3980, 3982), False, 'from time import asctime\n'), ((8181, 8190), 'time.asctime', 'asctime', ([], {}), '()\n', (8188, 8190), False, 'from time import asctime\n'), ((9463, 9472), 'time.asctime', 'asctime', ([], {}), '()\n', (9470, 9472), False, 'from time import asctime\n'), ((9779, 9788), 'time.asctime', 'asctime', ([], {}), '()\n', (9786, 9788), False, 'from time import asctime\n'), ((10389, 10415), 'numpy.random.poisson', 'np.random.poisson', (['truepix'], {}), '(truepix)\n', (10406, 10415), True, 'import numpy as np\n'), ((11822, 11846), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (11843, 11846), True, 'import numpy as np\n'), ((16998, 17022), 'astropy.time.Time', 'Time', (["header['DATE-OBS']"], {}), "(header['DATE-OBS'])\n", (17002, 17022), False, 'from astropy.time import Time\n'), ((17873, 17896), 'numpy.zeros', 'np.zeros', (['truepix.shape'], {}), '(truepix.shape)\n', (17881, 17896), True, 'import numpy as np\n'), ((17898, 17921), 'numpy.zeros', 'np.zeros', (['truepix.shape'], {}), '(truepix.shape)\n', (17906, 17921), True, 'import numpy as np\n'), ((18071, 18080), 'time.asctime', 'asctime', ([], {}), '()\n', (18078, 18080), False, 'from time import asctime\n'), ((21186, 21214), 'os.getenv', 'os.getenv', (['"""UNITTEST_SILENT"""'], {}), "('UNITTEST_SILENT')\n", (21195, 21214), False, 'import os\n'), ((21390, 21411), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (21409, 21411), False, 'import traceback\n'), ((23170, 23184), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (23182, 23184), True, 'import multiprocessing as mp\n'), ((4349, 4358), 'time.asctime', 'asctime', ([], {}), '()\n', (4356, 4358), False, 'from time import asctime\n'), ((5872, 5881), 'time.asctime', 'asctime', ([], {}), '()\n', (5879, 5881), False, 'from time import asctime\n'), ((6761, 6770), 'time.asctime', 'asctime', ([], {}), '()\n', (6768, 6770), False, 'from time import asctime\n'), ((17681, 17690), 'time.asctime', 'asctime', ([], {}), '()\n', (17688, 17690), False, 'from time import asctime\n'), ((23603, 23640), 'numpy.linspace', 'np.linspace', (['specmin', 'nspec', '(ncpu + 1)'], {}), '(specmin, nspec, ncpu + 1)\n', (23614, 23640), True, 'import numpy as np\n'), ((4987, 4996), 'time.asctime', 'asctime', ([], {}), '()\n', (4994, 4996), False, 'from time import asctime\n'), ((5289, 5298), 'time.asctime', 'asctime', ([], {}), '()\n', (5296, 5298), False, 'from time import asctime\n')]
|
from calendar import timegm
from datetime import datetime, timedelta, tzinfo
from socket import inet_ntoa, inet_aton
from struct import pack, unpack, calcsize
# Used for converting python datetime objects to and from FILETIME structures.
_EPOCH_AS_FILETIME = 116444736000000000
_HUNDREDS_OF_NANOS = 10000000
_ZERO = timedelta(0)
class _UTC(tzinfo):
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
_utc = _UTC()
def make_dword(v):
""" Creates a DWORD from a string. """
if isinstance(v, str) and len(v) <= 4:
v = v.rjust(4, '\0')
return unpack('>I', v.encode('ascii'))[0]
else:
raise TypeError("DWORD must be a string with at most 4 characters.")
def unmake_dword(v):
""" Converts an int DWORD value to a string """
buff = pack('<I', v)
return buff[::-1].decode('ascii')
def format_buffer(data):
""" Formats binary data in a human-friendly manner. """
if len(data) == 0:
return
if isinstance(data, (DataBuffer, DataReader)):
data = data.data
data_length = len(data)
mod = data_length % 16
ret = ''
# Format _most_ of the buffer.
for i in range(0, len(data)):
if i != 0 and i % 16 == 0:
ret += '\t'
# 16 bytes at a time
for j in range(i - 16, i):
ret += ('.' if data[j] < 0x20 or data[j] > 0x7F else chr(data[j]))
ret += '\n'
ret += ('00' + hex(data[i])[2:])[-2:] + ' '
# If the buffer length isn't a multiple of 16, add padding.
if mod != 0:
ret = ret.ljust(len(ret) + ((16 - mod) * 3))
j = (data_length - mod)
else:
j = data_length - 16
ret += '\t'
# Finish the line
for j in range(j, data_length):
ret += ('.' if data[j] < 0x20 or data[j] > 0x7F else chr(data[j]))
return ret + '\n'
class DataBuffer:
def __init__(self, data=None):
self.data = bytearray(data or b'')
def __len__(self):
return len(self.data)
def __str__(self):
return "Buffer: %i bytes" % len(self.data)
def __repr__(self):
return format_buffer(self.data)
def insert_raw(self, data):
""" Inserts raw binary data into the buffer.
Accepts str, bytes, and DataBuffer/DataReader objects.
"""
if isinstance(data, (DataBuffer, DataReader)):
self.data += data.data
else:
self.data += data
def insert_byte(self, b):
""" Inserts an unsigned byte to the end of the buffer. """
self.insert_raw(pack('<B', b))
def insert_word(self, w):
""" Inserts an unsigned 16-bit WORD to the end of the buffer. """
self.insert_raw(pack('<H', w))
def insert_dword(self, d):
""" Inserts an unsigned 32-bit DWORD to the end of the buffer. """
if isinstance(d, str):
self.insert_dword(make_dword(d))
else:
self.insert_raw(pack('<I', d))
def insert_long(self, q):
""" Inserts an unsigned 64-bit QWORD/FILETIME to the end of the buffer. """
self.insert_raw(pack('<Q', q))
def insert_string(self, s, encoding='utf-8', term=b'\0'):
""" Inserts a terminated string to the end of the buffer.
If term is omitted, a null-byte will be used.
"""
self.insert_raw(s.encode(encoding) + term)
def insert_ipv4(self, ipv4):
""" Inserts an IPv4 address as a 4-byte DWORD. """
self.insert_raw(inet_aton(ipv4))
def insert_filetime(self, dt):
""" Inserts a python datetime object to the end of the buffer as a 64-bit FILETIME. """
if isinstance(dt, int):
# Sometimes filetimes will be represented as raw integers.
return self.insert_long(dt)
if dt.tzinfo is None or (dt.tzinfo.utcoffset(dt) is None):
dt = dt.replace(tzinfo=_utc)
ft = _EPOCH_AS_FILETIME + (timegm(dt.timetuple()) * _HUNDREDS_OF_NANOS)
self.insert_long(ft + (dt.microsecond * 10))
def insert_format(self, fmt, *args):
""" Inserts multiple objects into the buffer with the specified format from struct.pack. """
self.insert_raw(pack(fmt, *args))
def clear(self):
""" Clears all data from the buffer. """
self.data = bytearray()
class DataReader:
def __init__(self, data=None):
self.data = bytearray(data or b'')
self.position = 0
@classmethod
def from_hex_string(cls, x):
""" Creates a data reader from a string of hex values. """
x = x.replace(' ', '')
if len(x) % 2 != 0:
raise ValueError("Hex string length not even - bytes must be 0-padded")
return cls(bytes([int(x[i:i+2], 16) for i in range(0, len(x), 2)]))
def __len__(self):
return len(self.data)
def __str__(self):
return "Reader: %i bytes, position: %i" % (len(self.data), self.position)
def __repr__(self):
return format_buffer(self.data)
def get_raw(self, length=-1, peek=False):
""" Returns raw data from the buffer.
If length is omitted or -1, all remaining data will be returned.
"""
if length == -1:
length = (len(self.data) - self.position)
r = self.data[self.position:(self.position + length)]
if not peek:
self.position += length
return bytes(r)
def get_byte(self, peek=False):
""" Returns the next byte from the buffer. """
return unpack('<B', self.get_raw(1, peek))[0]
def get_word(self, peek=False):
""" Returns the next 2 bytes as an unsigned 16-bit WORD. """
return unpack('<H', self.get_raw(2, peek))[0]
def get_dword(self, as_str=False, peek=False):
""" Returns the next 4 bytes as an unsigned 32-bit DWORD. """
if as_str:
return self.get_raw(4, peek)[::-1].decode('ascii')
else:
return unpack('<I', self.get_raw(4, peek))[0]
def get_long(self, peek=False):
""" Returns the next 8 bytes as an unsigned 64-bit QWORD/FILETIME. """
return unpack('<Q', self.get_raw(8, peek))[0]
def get_string(self, encoding='utf-8', term=b'\0', peek=False):
""" Returns a string starting at the current position and going to the next occurrence of term.
If encoding is None, a bytes object will be returned
If term is omitted, a null-byte will be used.
This is the opposite of DataBuffer.insert_string().
"""
index = self.data.index(term, self.position) if term in self.data[self.position:] else len(self.data)
data = self.get_raw(index - self.position, peek)
if not peek:
self.position += len(term)
return data.decode(encoding) if encoding else data
def get_ipv4(self, peek=False):
""" Returns the next 4 bytes as an IPv4 address string. """
return inet_ntoa(self.get_raw(4, peek))
def get_filetime(self, peek=False):
""" Returns the next 8 bytes as a python datetime object. """
ft = self.get_long(peek)
if ft == 0:
return None
(s, ns100) = divmod(ft - _EPOCH_AS_FILETIME, _HUNDREDS_OF_NANOS)
dt = datetime.utcfromtimestamp(s)
return dt.replace(microsecond=(ns100 // 10))
def get_format(self, fmt, peek=False):
""" Returns multiple objects from the specified format from struct.unpack. """
x = calcsize(fmt)
return unpack(fmt, self.get_raw(x, peek))
def eob(self):
""" Returns TRUE if the buffer has been fully read. """
return self.position >= len(self.data)
|
[
"struct.calcsize",
"struct.pack",
"socket.inet_aton",
"datetime.datetime.utcfromtimestamp",
"datetime.timedelta"
] |
[((319, 331), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (328, 331), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((872, 885), 'struct.pack', 'pack', (['"""<I"""', 'v'], {}), "('<I', v)\n", (876, 885), False, 'from struct import pack, unpack, calcsize\n'), ((7322, 7350), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['s'], {}), '(s)\n', (7347, 7350), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((7547, 7560), 'struct.calcsize', 'calcsize', (['fmt'], {}), '(fmt)\n', (7555, 7560), False, 'from struct import pack, unpack, calcsize\n'), ((2649, 2662), 'struct.pack', 'pack', (['"""<B"""', 'b'], {}), "('<B', b)\n", (2653, 2662), False, 'from struct import pack, unpack, calcsize\n'), ((2793, 2806), 'struct.pack', 'pack', (['"""<H"""', 'w'], {}), "('<H', w)\n", (2797, 2806), False, 'from struct import pack, unpack, calcsize\n'), ((3187, 3200), 'struct.pack', 'pack', (['"""<Q"""', 'q'], {}), "('<Q', q)\n", (3191, 3200), False, 'from struct import pack, unpack, calcsize\n'), ((3570, 3585), 'socket.inet_aton', 'inet_aton', (['ipv4'], {}), '(ipv4)\n', (3579, 3585), False, 'from socket import inet_ntoa, inet_aton\n'), ((4271, 4287), 'struct.pack', 'pack', (['fmt', '*args'], {}), '(fmt, *args)\n', (4275, 4287), False, 'from struct import pack, unpack, calcsize\n'), ((3033, 3046), 'struct.pack', 'pack', (['"""<I"""', 'd'], {}), "('<I', d)\n", (3037, 3046), False, 'from struct import pack, unpack, calcsize\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <<EMAIL>>
# ----------
#
# ----------
from utils import get_instrs_from_b2a, get_instrs
def test_return_none():
def func():
return None
assert get_instrs(func) == get_instrs_from_b2a(func)
def test_return_true():
def func():
return True
assert get_instrs(func) == get_instrs_from_b2a(func)
def test_return_false():
def func():
return False
assert get_instrs(func) == get_instrs_from_b2a(func)
def test_return_int():
def func():
return 10
assert get_instrs(func) == get_instrs_from_b2a(func)
def test_return_str():
def func():
return '10'
assert get_instrs(func) == get_instrs_from_b2a(func)
def test_return_bytes():
def func():
return b'10'
assert get_instrs(func) == get_instrs_from_b2a(func)
|
[
"utils.get_instrs",
"utils.get_instrs_from_b2a"
] |
[((225, 241), 'utils.get_instrs', 'get_instrs', (['func'], {}), '(func)\n', (235, 241), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((245, 270), 'utils.get_instrs_from_b2a', 'get_instrs_from_b2a', (['func'], {}), '(func)\n', (264, 270), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((344, 360), 'utils.get_instrs', 'get_instrs', (['func'], {}), '(func)\n', (354, 360), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((364, 389), 'utils.get_instrs_from_b2a', 'get_instrs_from_b2a', (['func'], {}), '(func)\n', (383, 389), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((465, 481), 'utils.get_instrs', 'get_instrs', (['func'], {}), '(func)\n', (475, 481), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((485, 510), 'utils.get_instrs_from_b2a', 'get_instrs_from_b2a', (['func'], {}), '(func)\n', (504, 510), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((581, 597), 'utils.get_instrs', 'get_instrs', (['func'], {}), '(func)\n', (591, 597), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((601, 626), 'utils.get_instrs_from_b2a', 'get_instrs_from_b2a', (['func'], {}), '(func)\n', (620, 626), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((699, 715), 'utils.get_instrs', 'get_instrs', (['func'], {}), '(func)\n', (709, 715), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((719, 744), 'utils.get_instrs_from_b2a', 'get_instrs_from_b2a', (['func'], {}), '(func)\n', (738, 744), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((820, 836), 'utils.get_instrs', 'get_instrs', (['func'], {}), '(func)\n', (830, 836), False, 'from utils import get_instrs_from_b2a, get_instrs\n'), ((840, 865), 'utils.get_instrs_from_b2a', 'get_instrs_from_b2a', (['func'], {}), '(func)\n', (859, 865), False, 'from utils import get_instrs_from_b2a, get_instrs\n')]
|
import re
import pandas as pd
import numpy as np
import ast
import pickle
import datetime
from nltk.corpus import stopwords
import pkg_resources
# from pkg_resources import resource_string, resource_listdir
def memoize(func):
memory = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in memory:
memory[key] = func(*args, **kwargs)
return memory[key]
return memoizer
@memoize
def levenshtein( s, t):
if s == "" or t == "":
return max(len(s), len(t))
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t) + 1,
levenshtein(s, t[:-1]) + 1,
levenshtein(s[:-1], t[:-1]) + cost])
# print(res)
return res
class ICD10:
def __init__(self):
data_file = pkg_resources.resource_filename('dbaicd10.resources', "dba_icd10.csv")
vocabulary_file = pkg_resources.resource_filename('dbaicd10.resources', "vocab_list.pkl")
## setting data and vocabulary
self.data = pd.read_csv(data_file)
self.data['Approximate Synonyms'] = self.data['Approximate Synonyms']\
.apply(lambda x: ast.literal_eval(x))
self.data['Applicable To'] = self.data['Applicable To'] \
.apply(lambda x: ast.literal_eval(x))
self.data['Clinical Info'] = self.data['Clinical Info'] \
.apply(lambda x: ast.literal_eval(x))
infile = open(vocabulary_file, 'rb')
self.vocab_list = pickle.load(infile)
infile.close()
self.stop_words = set(stopwords.words('english'))
# @memoize
# @staticmethod
def auto_correct(self, sentence, remove_stop_words=False, vocab=None, threshold=70):
'''
Auto corrects a sentence from a vocabulary based on ICD10 dataset
:param sentence: (String) text that needs to be autocorrects
:param remove_stop_words: (boolean) whether to remove stopwords from sentence
:param vocab: (list of string) If need to provide a custom vocabulary
:param threshold: ( Integer: Default=70) Corrects the word if it matches atleast threshold percent from any word from vocabulary
:return: (String) autocorrected sentence
'''
## Preprocessing
sentence = sentence.lower()
### Make alphanumeric
sentence = re.sub(r'\W+', ' ', sentence)
## remove double spaces
sentence = re.sub(' +', ' ', sentence)
allowed_error = 1 - (threshold / 100)
if vocab is None:
vocab = self.vocab_list
words = sentence.split()
final_sent = ''
for word in words:
## for each wors we will find in vocabulary, the vocab_word with least distance
distance = 9999
best_match = None
for vocab_word in vocab:
dist = levenshtein(vocab_word, word)
if dist < distance:
distance = dist
best_match = vocab_word
if distance < allowed_error * len(word):
final_sent = final_sent + " " + best_match
else:
final_sent = final_sent + " " + word
return final_sent.strip()
def search_helper(self, row, keywords):
## first search in name
# print( keywords)
# Step 1: Score of Name ( score = how many words match )
name = row['name'].lower().split()
# print(name)
name_score = 0
for keyword in keywords:
if keyword.lower().strip() in name:
name_score += 1
# print(name_score)
## Step 2: Socre of approximate synonyms
## now search in approximate synonyms
synonyms = row['Approximate Synonyms']
# synonyms = ast.literal_eval(synonyms)
# print(synonyms)
syn_scores = [0] * len(synonyms)
# there are multiple synonyms for each row,
# so we find score for each of them
for i, synonym in enumerate(synonyms):
synonym = synonym.lower().split()
for keyword in keywords:
if keyword.lower() in synonym:
syn_scores[i] += 1
# score of synonym is max of score of each synonym
synonym_score = np.max(syn_scores)
## Step 3: Score of applicable two
## now search in Applicable To
applicable_tos = row['Applicable To']
# applicable_tos = ast.literal_eval(applicable_tos)
# print(applibable_tos[0])
# for dk in
# synonyms = ast.literal_eval(synonyms)
# print(synonyms)
applicable_scores = [0] * len(applicable_tos)
## there are multiple applicable to for each row
# so we find score for each of them
for i, applicable in enumerate(applicable_tos):
# if applicable == 'Tennis elbow':
# print('Tennis elbow found')
# print(applicable)
applicable = applicable.lower().split()
for keyword in keywords:
if keyword.lower() in applicable:
applicable_scores[i] += 1
# score of synonym is max of score of each synonym
applicable_score = np.max(applicable_scores)
## STEP 4: Score of Clinical Info
## now search in Applicable To
clinical_infos = row['Clinical Info']
# clinical_infos = ast.literal_eval(clinical_infos)
# print(synonyms)
clinical_scores = [0] * len(clinical_infos)
## there are multiple applicable to for each row
# so we find score for each of them
for i, clinical in enumerate(clinical_infos):
clinical = clinical.lower().split()
for keyword in keywords:
if keyword.lower() in clinical:
clinical_scores[i] += 1
# score of synonym is max of score of each synonym
clinical_score = np.max(clinical_scores)
# print(syn_score)
# we return the score which is better name or synonym
# print([name_score, synonym_score, applicable_score, clinical_score])
return np.max([name_score, synonym_score, applicable_score, clinical_score])
def search_helper2(self, row, keywords):
INCREMENT_SCORE_BY = 1
## first search in name
# print( keywords)
## just makeone big string of all columns, and see how many of keywords we can find
all_cols = ''
all_cols += row['name'].lower()
all_cols += " ".join(row['Approximate Synonyms'])
# score of clinical info should be less than others
clinical_info = " ".join(row['Clinical Info'])
all_cols += " ".join(row['Applicable To'])
# lower
all_cols = all_cols.strip().lower()
all_cols = re.sub(r'\W+', ' ', all_cols)
## remove double spaces
all_cols = re.sub(' +', ' ', all_cols)
# all_words = all_cols.split()
score = 0
## searcg for keywords
for keyword in keywords:
## SOME OPTIMIZATIONS: Here we are calculating few thinks which we will require multiple times
SPACE_IN_KEYWORD = ' ' in keyword
KEYWORD_SPLIT = keyword.split()
## if we find exact keyword ( example: "muscle fatigue" ) then score is
## increased 1
if keyword in all_cols:
## if keyword is of multple words, and it matches means it should increase score more
if SPACE_IN_KEYWORD:
score += 1.23 * INCREMENT_SCORE_BY
else:
score += INCREMENT_SCORE_BY
## else we find if keyword can be further divided into smaller keywords
elif SPACE_IN_KEYWORD:
for temp_keyword in KEYWORD_SPLIT:
if temp_keyword in all_cols:
score += 0.23 * INCREMENT_SCORE_BY
## if found in clinical info, increase the score, but less
if keyword in clinical_info:
score += INCREMENT_SCORE_BY * 0.6
elif SPACE_IN_KEYWORD:
for temp_keyword in KEYWORD_SPLIT:
if temp_keyword in clinical_info:
score += 0.23 * INCREMENT_SCORE_BY * 0.6
## extra scores
## if keyword is in name only then we give it extra score
if keyword in row['name'].lower():
score += INCREMENT_SCORE_BY * 0.23
elif SPACE_IN_KEYWORD:
for temp_keyword in KEYWORD_SPLIT:
if temp_keyword in row['name']:
score += 0.1 * INCREMENT_SCORE_BY
return score
def search(self, keyword, auto_correct_keywords=True, show_time_spent=True, return_top_n=10,
split_with_stopwords=True):
'''
Search in ICD10 dataset for the provided keywords. It performs a simple match word search.
:param keyword: (String) keywords or sentence to search for. Keywords seperated by space
:param auto_correct_keywords: (Boolean: default=True) Keep it true for spell check of the given keywords
:param show_time_spent: (Boolean: default=True) Display time utilized for search
:param return_top_n: (integer: default:10) Returns the number of top results. Is set to 10 returns top 10 results
:param split_with_stopwords: (Boolean: default=True) Keep it true if you want to split the search query from stopwords instead of space. Refer example below for more info
:return: Returns a pandas dataframe with top matches
use case:
search("<NAME>")
search("<NAME>", auto_correct_keywords=True, return_top_n=5)
Example of split_with_stopwords:
There might be cases where you want to keep two words together, instance "Right Hand"
So here we split the query from stopwords instead of spaces. Thus,
"Fracture in right hand" becomes => ["fracture", "right hand"] instead of "fracture", "right", hand"]
Note that "in" was the stopword and query got splitted from "in"
'''
before = datetime.datetime.now()
keyword = keyword.lower()
if auto_correct_keywords:
keyword = self.auto_correct(keyword)
if split_with_stopwords:
for stopword in self.stop_words:
if stopword in keyword:
keyword = keyword.replace(' ' + stopword + ' ', '#')
keywords = keyword.split('#')
else:
keywords = keyword.split()
keywords = " ".join([d for d in keywords if d not in self.stop_words])
keywords = keywords.split()
print('Searching for: "' + " ".join(keywords) + '"')
result = self.data.apply(self.search_helper2, axis=1, keywords=keywords)
after = datetime.datetime.now()
diff = after - before
if show_time_spent:
print("Search completed in", diff.seconds, "seconds")
return self.data.loc[result.nlargest(return_top_n, keep='first').index]
|
[
"pandas.read_csv",
"pkg_resources.resource_filename",
"datetime.datetime.now",
"numpy.max",
"pickle.load",
"nltk.corpus.stopwords.words",
"ast.literal_eval",
"re.sub"
] |
[((839, 909), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""dbaicd10.resources"""', '"""dba_icd10.csv"""'], {}), "('dbaicd10.resources', 'dba_icd10.csv')\n", (870, 909), False, 'import pkg_resources\n'), ((936, 1007), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""dbaicd10.resources"""', '"""vocab_list.pkl"""'], {}), "('dbaicd10.resources', 'vocab_list.pkl')\n", (967, 1007), False, 'import pkg_resources\n'), ((1068, 1090), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (1079, 1090), True, 'import pandas as pd\n'), ((1562, 1581), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1573, 1581), False, 'import pickle\n'), ((2418, 2447), 're.sub', 're.sub', (['"""\\\\W+"""', '""" """', 'sentence'], {}), "('\\\\W+', ' ', sentence)\n", (2424, 2447), False, 'import re\n'), ((2499, 2526), 're.sub', 're.sub', (['""" +"""', '""" """', 'sentence'], {}), "(' +', ' ', sentence)\n", (2505, 2526), False, 'import re\n'), ((4382, 4400), 'numpy.max', 'np.max', (['syn_scores'], {}), '(syn_scores)\n', (4388, 4400), True, 'import numpy as np\n'), ((5334, 5359), 'numpy.max', 'np.max', (['applicable_scores'], {}), '(applicable_scores)\n', (5340, 5359), True, 'import numpy as np\n'), ((6049, 6072), 'numpy.max', 'np.max', (['clinical_scores'], {}), '(clinical_scores)\n', (6055, 6072), True, 'import numpy as np\n'), ((6264, 6333), 'numpy.max', 'np.max', (['[name_score, synonym_score, applicable_score, clinical_score]'], {}), '([name_score, synonym_score, applicable_score, clinical_score])\n', (6270, 6333), True, 'import numpy as np\n'), ((6937, 6966), 're.sub', 're.sub', (['"""\\\\W+"""', '""" """', 'all_cols'], {}), "('\\\\W+', ' ', all_cols)\n", (6943, 6966), False, 'import re\n'), ((7018, 7045), 're.sub', 're.sub', (['""" +"""', '""" """', 'all_cols'], {}), "(' +', ' ', all_cols)\n", (7024, 7045), False, 'import re\n'), ((10299, 10322), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10320, 10322), False, 'import datetime\n'), ((11011, 11034), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11032, 11034), False, 'import datetime\n'), ((1636, 1662), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1651, 1662), False, 'from nltk.corpus import stopwords\n'), ((1235, 1254), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (1251, 1254), False, 'import ast\n'), ((1352, 1371), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (1368, 1371), False, 'import ast\n'), ((1469, 1488), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (1485, 1488), False, 'import ast\n')]
|
# Generated by Django 3.2.10 on 2022-02-18 15:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("nautobot_device_lifecycle_mgmt", "0008_software_image_data_migration"),
]
operations = [
migrations.RemoveField(
model_name="softwarelcm",
name="download_url",
),
migrations.RemoveField(
model_name="softwarelcm",
name="image_file_checksum",
),
migrations.RemoveField(
model_name="softwarelcm",
name="image_file_name",
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((262, 331), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""softwarelcm"""', 'name': '"""download_url"""'}), "(model_name='softwarelcm', name='download_url')\n", (284, 331), False, 'from django.db import migrations\n'), ((376, 452), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""softwarelcm"""', 'name': '"""image_file_checksum"""'}), "(model_name='softwarelcm', name='image_file_checksum')\n", (398, 452), False, 'from django.db import migrations\n'), ((497, 569), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""softwarelcm"""', 'name': '"""image_file_name"""'}), "(model_name='softwarelcm', name='image_file_name')\n", (519, 569), False, 'from django.db import migrations\n')]
|
import pyOcean_cpu as ocean
# Fortran-style strides
a = ocean.tensor([3,4],'F');
a.copy(range(a.nelem))
print(a)
print(a.strides)
b = a.reshape([2,3,2])
print(b)
print(b.storage.obj == a.storage.obj)
a.reshape([2,6],True)
print(a)
# C-style strides
a = ocean.tensor([3,4],'C');
a.copy(range(a.nelem))
print(a)
print(a.strides)
b = a.reshape([2,3,2])
print(b)
print(b.storage.obj == a.storage.obj)
a.reshape([2,6],True)
print(a)
|
[
"pyOcean_cpu.tensor"
] |
[((57, 82), 'pyOcean_cpu.tensor', 'ocean.tensor', (['[3, 4]', '"""F"""'], {}), "([3, 4], 'F')\n", (69, 82), True, 'import pyOcean_cpu as ocean\n'), ((258, 283), 'pyOcean_cpu.tensor', 'ocean.tensor', (['[3, 4]', '"""C"""'], {}), "([3, 4], 'C')\n", (270, 283), True, 'import pyOcean_cpu as ocean\n')]
|
#-*- coding: utf-8 -*-
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
# ---------- Made by <NAME> @ircam on 11/2015
# ---------- Copyright (c) 2018 CREAM Lab // CNRS / IRCAM / Sorbonne Université
# ----------
# ---------- process video in diferent ways
# ---------- to use this don't forget to include these lines before your script:
# ----------
# ----------
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
from __future__ import absolute_import
from __future__ import print_function
import sys
from transform_audio import extract_sentences_tags
def extract_audio(video_file, target_name):
"""
parameters:
video_file : file to extract wav from
extract audio to wav and return the name of the extracted audio
"""
import subprocess
import os
try:
os.remove(target_name)
except:
pass
base = os.path.basename(video_file)
file_name = os.path.splitext(base)[0]
path = os.path.dirname(os.path.realpath(video_file))
command = "ffmpeg -i "+video_file+" -ab 160k -ac 2 -ar 44100 -vn "+ target_name
subprocess.call(command, shell=True)
return target_name
def replace_audio(video_file, new_audio, target_video):
"""
Replace the audio of video_file with new_audio
parameters:
video_file : video file to change the audio
new_audio : new audio to use
target_video : video to be created
"""
import subprocess
import os
#remove target video if it exists
try:
os.remove(target_video)
except:
pass
base = os.path.basename(video_file)
file_name = os.path.splitext(base)[0]
path = os.path.dirname(os.path.realpath(video_file))
#Old command to process /mov, re-encoding audio
#command = "ffmpeg -i "+video_file+" -i "+new_audio+" -map 0:v -map 1:a -c copy -shortest " + target_video
#New command to process mp4, re-encoding audio
command = "ffmpeg -i "+video_file+" -i "+new_audio+" -c:v copy -map 0:v:0 -map 1:a:0 "+target_video
subprocess.call(command, shell=True)
def erase_audio_from_video(input_video, output_video):
"""
Erase the audio from a video
parameters:
input_video : source video
output_video : video to be created without the audio
"""
import subprocess
import os
command = "ffmpeg -i "+input_video+" -vcodec copy -an "+output_video
subprocess.call(command, shell=True)
def get_movie_stream(video):
"""
Get movie stream
parameters:
video : video from which stream will be extracted
"""
import subprocess
import os
import shlex, subprocess
command = "ffmpeg -i "+video+" 2>&1 | grep \"Stream\""
output = subprocess.check_output(command, shell=True)
return output
def get_movie_duration(video):
"""
Get the duration of video
parameters:
video : videon input
"""
import subprocess
import os
import shlex, subprocess
command = "ffmpeg -i "+video+" 2>&1 | grep \"Duration\""
output = subprocess.check_output(command, shell=True)
return output
def extract_sentences_in_video(source_name, target_folder, rms_threshold = -50, WndSize = 16384, overlap = 8192):
"""
In a video with many sentences and blanks between the sentences, create separate video files with one sentence per file.
This is very usefull for indexing databases.
This only works if the video has sound.
The algorithm looks at the RMS of the audio and extracts the sentences.
parameters:
source_name : videon input
target_folder : folder in which the videos will be cretaed
rms_threshold : rms_threshold specifying where to consider there is a slience
WndSize :
overlap :
"""
import os
audio_name = "auxiliary_audio_file_14357XXX.wav"
try:
os.remove(audio_name)
except:
pass
file_name = os.path.splitext(source_name)[0]
file_name = os.path.basename(file_name)
extract_audio(source_name, audio_name)
tags, lengths = extract_sentences_tags(audio_name, rms_threshold = rms_threshold, WndSize = WndSize, overlap = overlap)
cpt=1
for tag in tags:
extract_sub_video_sentences( source_name = source_name
, target_name = target_folder +file_name+"_"+ str(cpt)+ ".mp4"
, start = tag[0]
, length = lengths[0]
)
cpt+=1
os.remove(audio_name)
def extract_sub_video_sentences(source_name, target_name, start, length):
"""
Takes a sub video in source_name begining at start and ending at end
The start time should be in this format: 00:24:00
Length in seconds.
"""
import subprocess
import os
#command = "ffmpeg -i "+source_name+" -ss "+start+" -t "+end+" -async 1 "+target_name
command = "ffmpeg -i "+source_name+" -ss "+start+" -t "+length+" -async 1 -strict -2 "+target_name
subprocess.call(command, shell=True)
def video_scaling(source_name, target_name, resolution):
"""
changes the scale of the video
source name : video to be transformed
target name : transformed video
resolution : target resolution, tuple, pair of values
"""
import subprocess
import os
try:
os.remove(target_name)
except:
pass
resolution = [str(i) for i in resolution]
command = "ffmpeg -i "+source_name+" -vf scale="+resolution[0]+":"+resolution[1]+":force_original_aspect_ratio=decrease -strict -2 "+target_name+" -hide_banner"
subprocess.call(command, shell=True)
def four_3_to_16_9(source_name, target_name):
"""
changes the scale of the video
source name : video to be transformed
target name : transformed video
resolution : target resolution, tuple, pair of values
"""
import subprocess
import os
try:
os.remove(target_name)
except:
pass
#command = "ffmpeg -i "+source_name+" -vf \"scale=640x480,setsar=1,pad=854:480:107:0\" "+target_name
command = "ffmpeg -i "+source_name+" -vf scale=1080x1920,setdar=16:9 "+target_name
#command = "ffmpeg -i "+source_name+" -vf scale=1080x1920,setdar=16:9 "+target_name
subprocess.call(command, shell=True)
def four_3_to_16_9_v2(source_name, target_name):
import subprocess
import os
try:
os.remove(target_name)
except:
pass
#command = "ffmpeg -i "+source_name+" -vf \"scale=640x480,setsar=1,pad=854:480:107:0\" "+target_name
command = "ffmpeg -i "+source_name+" -q:v 10 -g 1 "+target_name
subprocess.call(command, shell=True)
def cut_silence_in_video(source, target, rmsTreshhold = -40, WndSize = 128):
"""
cut the silence at the begining and at the end of the video
source name : video to be transformed
target name : transformed video
rmsTreshhold : threshold to use for silence
WndSize : window size to search for silence at the begining and at the end
"""
import os
try:
os.remove(target)
except:
pass
#extract audio
audio_aux = "aux_audio_15452.wav"
extract_audio(source, audio_aux)
#Extract_time tags for begining and end
from .transform_audio import get_sound_without_silence
begining_s, end_s = get_sound_without_silence(audio_aux, rmsTreshhold = rmsTreshhold, WndSize = WndSize)
len_s = end_s - begining_s
#adapt the time to the good format
from datetime import timedelta, datetime
begining_s = datetime(1,1,1) + timedelta(seconds=begining_s)
end_s = datetime(1,1,1) + timedelta(seconds=end_s)
len_s = datetime(1,1,1) + timedelta(seconds=len_s)
begining_s = "%d:%d:%d.%3d" % (begining_s.hour, begining_s.minute, begining_s.second, begining_s.microsecond)
end_s = "%d:%d:%d.%3d" % (end_s.hour, end_s.minute, end_s.second, end_s.microsecond)
len_s = "%d:%d:%d.%3d" % (len_s.hour, len_s.minute,len_s.second , len_s.microsecond)
print(begining_s)
print(end_s)
print(len_s)
#extract sub_video
extract_sub_video_sentences(source, target, begining_s, len_s)
#remove aux audio
os.remove(audio_aux)
def denoise_audio_in_video(source, target, gain_reduction =10, amp_threshold = -55, wnd_size = 8192 ):
"""
denois the sound from a video
source : video to be transformed
target : transformed video
amp_threshold : threshold to use for silence
gain_reduction : gain to reduce the noise
wnd_size : moving window size
"""
from super_vp_commands import denoise_sound
import os
try:
os.remove(target)
except:
pass
#extract audio
audio_aux = "aux_audio_1.wav"
extract_audio(source, audio_aux)
#denoise sound
audio_aux2 = "aux_audio_2.wav"
denoise_sound(audio_aux, audio_aux2
, gain_reduction = gain_reduction
, amp_threshold = amp_threshold
, wnd_size = wnd_size
)
#replace sound
replace_audio(
video_file = source
, new_audio = audio_aux2
, target_video = target
)
#remove sounds
os.remove(audio_aux)
os.remove(audio_aux2)
def change_video_format(source, target):
"""
Change the video extension of video to the one of file
"""
import subprocess
import os
try:
os.remove(target)
except:
pass
#command = "ffmpeg -i "+source+" -vcodec copy -acodec copy "+target
command = "ffmpeg -i "+source+" -ar 22050 -b 3298k "+target
subprocess.call(command, shell=True)
def convert_to_avi(source, target):
import subprocess
import os
try:
os.remove(target)
except:
pass
#command = "ffmpeg -i "+source+" -vcodec mpeg4 -vtag XVID -b 990k -bf 2 -g 300 -s 640x360 -pass 1 -an -threads 0 -f rawvideo -y /dev/null"
#subprocess.call(command, shell=True)
#command = "ffmpeg -i "+source+" -vcodec mpeg4 -vtag XVID -b 990k -bf 2 -g 300 -s 720x576 -acodec libmp3lame -ab 256k -ar 48000 -ac 2 -pass 2 -threads 0 -f avi "+target
command = "ffmpeg -i "+source+" -vcodec mpeg4 -vtag XVID -b 990k -bf 2 -g 300 -s 720x576 -acodec libmp3lame -pass 1 -ab 256 -threads 0 -f avi "+target
subprocess.call(command, shell=True)
def extract_frames_video(source, folder, tag="", fps=25):
"""
Extract the frames of a video
"""
import subprocess
import os
os.mkdir(folder)
command = "ffmpeg -i "+source+" -r "+str(fps)+" "+folder+tag+"$filename%01d.bmp"
subprocess.call(command, shell=True)
def change_frame_rate(source, target_fps, output):
import subprocess
import os
command = "ffmpeg -i "+source+" -filter:v fps="+str(target_frame_rate) +" " +output
subprocess.call(command, shell=True)
def crop_video(source_video, target_video, x=0, y=0,out_w=0 , out_h=0 ):
import subprocess
import os
#Something like this might work:
command = "ffmpeg -i "+source_video+" -strict -2 -filter:v crop="+str(out_w)+":"+str(out_h)+":"+str(x)+":"+str(y)+" "+target_video
subprocess.call(command, shell=True)
def get_fps(source):
"""
Get fps of video file
"""
import subprocess
import os
import shlex
command = "ffprobe -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate " + source
output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).communicate()[0]
return output
#subprocess.call(command, shell=True)
#p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#out, err = p.communicate()
#return out
#p = subprocess.Popen(shlex.split(command), bufsize=1, universal_newlines=True)
#return p.communicate()
#from subprocess import check_output
#x = check_output(["ffprobe", "-v", "error", "-select_streams", "v", "-of", "default=noprint_wrappers=1:nokey=1", "-show_entries stream=r_frame_rate", source],shell=True,stderr=subprocess.STDOUT)
#return x
def color_correction(source_video, target_video, gamma=1.5, saturation =1.3):
"""
Gamma increases ...
Saturation increases colors
"""
import subprocess
import os
command = "ffmpeg -i " + source_video + " -vf eq=gamma="+str(1.5)+":saturation="+str(saturation)+" -c:a copy "+ target_video
subprocess.call(command, shell=True)
def create_movie_from_frames(frame_name_tag, fps, img_extension , target_video, video_codec="copy", preset="ultrafast", loseless=0):
"""
Create a movie with a series of frames
frame_name_tag : if your frames are named frame_001.png, frame_name_tag="frame_"
target_video : the video that will be created
video_codec : specifiy the video copy flag to pass to ffmpeg.
Possiblities:
"copy" : will copy frames, videos will be very big, but generation will bef ast
"libx265" : generation will be slow but videos will be small
"preset" : The default is medium. The preset determines compression efficiency and therefore affects encoding speed.
options: superfast, veryfast, faster, fast, medium, slow, slower, veryslow, and placebo.
Use the slowest preset you have patience for. Ignore placebo as it provides insignificant returns for a significant increase in encoding time.
"loseless" : 1 : losseless encoding; 0 : loss encoding
"""
import subprocess
import os
#command = "ffmpeg -framerate "+str(fps)+" -i "+frame_name_tag+"%d"+img_extension+" -vcodec "+video_codec+" -acodec copy -preset ultrafast "+target_video
command = "ffmpeg -framerate "+str(fps)+" -i "+frame_name_tag+"%d"+img_extension+" -vcodec "+video_codec+" -acodec copy -preset "+preset+" -x265-params lossless="+str(loseless)+" "+target_video
subprocess.call(command, shell=True)
def compress_to_h265(video_in, video_out):
import subprocess
import os
command = "ffmpeg -i "+video_in+" -vcodec libx265 -crf 28 "+video_out
subprocess.call(command, shell=True)
def sharpen_video(source_video, target_video):
"""
Create a movie with a series of frames
frame_name_tag : if your frames are named frame_001.png, frame_name_tag="frame_"
target_video : the video that will be created
"""
import subprocess
import os
command = "ffmpeg -i "+source_video+" -vf unsharp "+target_video
subprocess.call(command, shell=True)
def combine_2_videos(left, right, output):
"""
Create a movie with 2 movies
"""
import subprocess
#command = "ffmpeg -i "+left+" -i "+right+" -filter_complex \"[0][1]scale2ref=w=oh*mdar:h=ih[left][right];[left][right]hstack\" "+output
#command = "ffmpeg -i "+left+" -i "+ right+" -filter_complex \"[0]scale=175:100:force_original_aspect_ratio=decrease,pad=175:100:-1:-1:color=gray,setsar=1[left];[1]scale=175:100:force_original_aspect_ratio=decrease,pad=175:100:-1:-1:color=gray,setsar=1[right];[left][right]hstack\" "+ output
command = "ffmpeg -i "+left+" -i "+right+" -filter_complex \"[0][1]scale2ref=w=oh*mdar:h=ih[left][right];[left][right]hstack\" "+ output
subprocess.call(command, shell=True)
def combine_videos( tl, tr, bl, br,output, audios = [] ):
"""
Create a movie with 4 movies!
tl : top left; tr : top right, bl : bottom left; br : bottom_right
"""
import subprocess
import os
if audios != []:
master_audio = "master_audio_aux_file.wav"
combine_audio(audios, master_audio)
#command = "ffmpeg -i "+tl+" -i "+tr+" -i "+bl+" -i "+bl+" -i "+ master_audio +" -filter_complex \"[0:v][1:v]hstack[t];[2:v][3:v]hstack[b];[t][b]vstack[v]\" -map \"[v]\" -c:a copy -shortest "+ output
command = "ffmpeg -i "+tl+" -i "+tr+" -i "+bl+" -i "+br+" -i "+master_audio+" -filter_complex \"[0:v][1:v]hstack[t];[2:v][3:v]hstack[b];[t][b]vstack[v]\" -map \"[v]\" -map 4:a -c:a copy -shortest "+output
else :
command = "ffmpeg -i "+tl+" -i "+tr+" -i "+bl+" -i "+bl+" -ac 2 -filter_complex \"[0:v][1:v]hstack[t];[2:v][3:v]hstack[b];[t][b]vstack[v]\" -map \"[v]\" -c:a copy -shortest "+ output
subprocess.call(command, shell=True)
#delete master audio
if audios != []:
os.remove(master_audio)
def combine_audio(files, target_audio, pre_normalisation=True):
"""
input : file names in an array (you can use videos!!)
Combine audio_files into one
"""
import soundfile
from transform_audio import wav_to_mono
import os
import numpy as np
#Extract audio from video and convert to mono
audio_files = []
for cpt, file in enumerate(files):
#extract audio
audio = str(cpt)+"_aux_audio_1439.wav"
audio_files.append(audio)
extract_audio(file, audio)
#To mono
wav_to_mono(audio, audio)
#read audios
raw_audios = []
for file in audio_files:
#read audio
x, fs = soundfile.read(file)
#normalize loudness, if needed
if pre_normalisation:
x = x / np.max(x)
raw_audios.append(x)
#Pad difference
lengths = [len(i) for i in raw_audios]
#Find longer file
max_value = max(lengths)
max_index = lengths.index(max_value)
#pad audio
paded_audio = []
for raw_audio in raw_audios:
diff = abs(len(raw_audio) - max_value)
pad = [0.0 for i in range(diff)]
pad = np.asarray(pad)
paded_audio.append(np.concatenate([raw_audio, pad]))
paded_audio = np.sum(paded_audio, axis=0)
#normalize
paded_audio = paded_audio/ np.max(paded_audio)
#Export audio
soundfile.write(target_audio, paded_audio , fs)
#delete files
for file in audio_files:
os.remove(file)
|
[
"os.mkdir",
"os.remove",
"numpy.sum",
"transform_audio.extract_sentences_tags",
"numpy.max",
"datetime.timedelta",
"soundfile.write",
"subprocess.Popen",
"soundfile.read",
"os.path.basename",
"subprocess.check_output",
"os.path.realpath",
"numpy.asarray",
"datetime.datetime",
"subprocess.call",
"numpy.concatenate",
"super_vp_commands.denoise_sound",
"transform_audio.wav_to_mono",
"os.path.splitext"
] |
[((1006, 1034), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (1022, 1034), False, 'import os\n'), ((1212, 1248), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1227, 1248), False, 'import subprocess\n'), ((1637, 1665), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (1653, 1665), False, 'import os\n'), ((2072, 2108), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2087, 2108), False, 'import subprocess\n'), ((2410, 2446), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2425, 2446), False, 'import subprocess\n'), ((2701, 2745), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2724, 2745), False, 'import subprocess\n'), ((2993, 3037), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (3016, 3037), False, 'import subprocess\n'), ((3839, 3866), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (3855, 3866), False, 'import os\n'), ((3925, 4027), 'transform_audio.extract_sentences_tags', 'extract_sentences_tags', (['audio_name'], {'rms_threshold': 'rms_threshold', 'WndSize': 'WndSize', 'overlap': 'overlap'}), '(audio_name, rms_threshold=rms_threshold, WndSize=\n WndSize, overlap=overlap)\n', (3947, 4027), False, 'from transform_audio import extract_sentences_tags\n'), ((4268, 4289), 'os.remove', 'os.remove', (['audio_name'], {}), '(audio_name)\n', (4277, 4289), False, 'import os\n'), ((4738, 4774), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (4753, 4774), False, 'import subprocess\n'), ((5288, 5324), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (5303, 5324), False, 'import subprocess\n'), ((5894, 5930), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (5909, 5930), False, 'import subprocess\n'), ((6228, 6264), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (6243, 6264), False, 'import subprocess\n'), ((7665, 7685), 'os.remove', 'os.remove', (['audio_aux'], {}), '(audio_aux)\n', (7674, 7685), False, 'import os\n'), ((8247, 8366), 'super_vp_commands.denoise_sound', 'denoise_sound', (['audio_aux', 'audio_aux2'], {'gain_reduction': 'gain_reduction', 'amp_threshold': 'amp_threshold', 'wnd_size': 'wnd_size'}), '(audio_aux, audio_aux2, gain_reduction=gain_reduction,\n amp_threshold=amp_threshold, wnd_size=wnd_size)\n', (8260, 8366), False, 'from super_vp_commands import denoise_sound\n'), ((8531, 8551), 'os.remove', 'os.remove', (['audio_aux'], {}), '(audio_aux)\n', (8540, 8551), False, 'import os\n'), ((8553, 8574), 'os.remove', 'os.remove', (['audio_aux2'], {}), '(audio_aux2)\n', (8562, 8574), False, 'import os\n'), ((8887, 8923), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (8902, 8923), False, 'import subprocess\n'), ((9542, 9578), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (9557, 9578), False, 'import subprocess\n'), ((9712, 9728), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (9720, 9728), False, 'import os\n'), ((9813, 9849), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (9828, 9849), False, 'import subprocess\n'), ((10018, 10054), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (10033, 10054), False, 'import subprocess\n'), ((10328, 10364), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (10343, 10364), False, 'import subprocess\n'), ((11520, 11556), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (11535, 11556), False, 'import subprocess\n'), ((12904, 12940), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (12919, 12940), False, 'import subprocess\n'), ((13092, 13128), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (13107, 13128), False, 'import subprocess\n'), ((13458, 13494), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (13473, 13494), False, 'import subprocess\n'), ((14175, 14211), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (14190, 14211), False, 'import subprocess\n'), ((15123, 15159), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (15138, 15159), False, 'import subprocess\n'), ((16529, 16556), 'numpy.sum', 'np.sum', (['paded_audio'], {'axis': '(0)'}), '(paded_audio, axis=0)\n', (16535, 16556), True, 'import numpy as np\n'), ((16655, 16701), 'soundfile.write', 'soundfile.write', (['target_audio', 'paded_audio', 'fs'], {}), '(target_audio, paded_audio, fs)\n', (16670, 16701), False, 'import soundfile\n'), ((956, 978), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (965, 978), False, 'import os\n'), ((1048, 1070), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (1064, 1070), False, 'import os\n'), ((1098, 1126), 'os.path.realpath', 'os.path.realpath', (['video_file'], {}), '(video_file)\n', (1114, 1126), False, 'import os\n'), ((1588, 1611), 'os.remove', 'os.remove', (['target_video'], {}), '(target_video)\n', (1597, 1611), False, 'import os\n'), ((1679, 1701), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (1695, 1701), False, 'import os\n'), ((1729, 1757), 'os.path.realpath', 'os.path.realpath', (['video_file'], {}), '(video_file)\n', (1745, 1757), False, 'import os\n'), ((3741, 3762), 'os.remove', 'os.remove', (['audio_name'], {}), '(audio_name)\n', (3750, 3762), False, 'import os\n'), ((3793, 3822), 'os.path.splitext', 'os.path.splitext', (['source_name'], {}), '(source_name)\n', (3809, 3822), False, 'import os\n'), ((5041, 5063), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (5050, 5063), False, 'import os\n'), ((5581, 5603), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (5590, 5603), False, 'import os\n'), ((6019, 6041), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (6028, 6041), False, 'import os\n'), ((6629, 6646), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (6638, 6646), False, 'import os\n'), ((7075, 7092), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7083, 7092), False, 'from datetime import timedelta, datetime\n'), ((7093, 7122), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'begining_s'}), '(seconds=begining_s)\n', (7102, 7122), False, 'from datetime import timedelta, datetime\n'), ((7132, 7149), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7140, 7149), False, 'from datetime import timedelta, datetime\n'), ((7150, 7174), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'end_s'}), '(seconds=end_s)\n', (7159, 7174), False, 'from datetime import timedelta, datetime\n'), ((7184, 7201), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7192, 7201), False, 'from datetime import timedelta, datetime\n'), ((7202, 7226), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'len_s'}), '(seconds=len_s)\n', (7211, 7226), False, 'from datetime import timedelta, datetime\n'), ((8081, 8098), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (8090, 8098), False, 'import os\n'), ((8721, 8738), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (8730, 8738), False, 'import os\n'), ((9001, 9018), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (9010, 9018), False, 'import os\n'), ((15203, 15226), 'os.remove', 'os.remove', (['master_audio'], {}), '(master_audio)\n', (15212, 15226), False, 'import os\n'), ((15788, 15813), 'transform_audio.wav_to_mono', 'wav_to_mono', (['audio', 'audio'], {}), '(audio, audio)\n', (15799, 15813), False, 'from transform_audio import wav_to_mono\n'), ((15921, 15941), 'soundfile.read', 'soundfile.read', (['file'], {}), '(file)\n', (15935, 15941), False, 'import soundfile\n'), ((16429, 16444), 'numpy.asarray', 'np.asarray', (['pad'], {}), '(pad)\n', (16439, 16444), True, 'import numpy as np\n'), ((16608, 16627), 'numpy.max', 'np.max', (['paded_audio'], {}), '(paded_audio)\n', (16614, 16627), True, 'import numpy as np\n'), ((16763, 16778), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (16772, 16778), False, 'import os\n'), ((16472, 16504), 'numpy.concatenate', 'np.concatenate', (['[raw_audio, pad]'], {}), '([raw_audio, pad])\n', (16486, 16504), True, 'import numpy as np\n'), ((10607, 10668), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE)\n', (10623, 10668), False, 'import subprocess\n'), ((16040, 16049), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (16046, 16049), True, 'import numpy as np\n')]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Type
import re
if TYPE_CHECKING:
import volga.types as types
from typing import get_type_hints
import volga.fields as fields
import volga.format as format
import volga.exceptions as exceptions
RE_FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
# parse string after first '"' and terminate by '"'
STRING_RE = re.compile(r'(.*?)(")', RE_FLAGS)
# to be used for parsing JSON numbers
NUMBER_RE = re.compile(
r"(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?",
RE_FLAGS,
)
class JSON(format.Format):
"""hello this a json file
Args:
Format ([type]): [description]
"""
def __init__(self, input: str) -> None:
self.s: str = input
self.idx: int = 0
def _parse_bool(self) -> bool:
idx = self.idx
curr_char = self.s[idx]
if curr_char == "t" and self.s[idx : idx + 4] == "true":
# move to 1 pass the last letter of true
self.idx += 5
return True
elif curr_char == "f" and self.s[idx : idx + 5] == "false":
self.idx += 6
return False
else:
raise exceptions.ParsingError("Expected bool")
def _parse_float(self) -> float:
match = NUMBER_RE.match(self.s, self.idx)
if match:
integer, frac, exp = match.groups()
n = float(integer + (frac or "") + (exp or ""))
self.idx = match.end()
return n
else:
raise exceptions.ParsingError("Expected float")
def _parse_int(self) -> int:
match = NUMBER_RE.match(self.s, self.idx)
if match:
integer, frac, exp = match.groups()
if frac or exp:
raise exceptions.ParsingError("Expected integer value.")
else:
n = int(integer)
self.idx = match.end()
return n
else:
raise exceptions.ParsingError("Expected int")
def _parse_str(self) -> str:
if type(self.s) != str:
raise exceptions.ParsingError("Expected String, not " + str(type(self.s)))
if self.s[self.idx] != '"':
raise exceptions.ParsingError("Expected String")
# skip past first "
chunk = STRING_RE.match(self.s, self.idx + 1)
if chunk is None:
raise exceptions.ParsingError("Unterminated string.")
content, _ = chunk.groups()
self.idx = chunk.end()
return content
def _parse_none(self) -> None:
idx = self.idx
curr_char = self.s[idx]
if curr_char == "n" and self.s[idx : idx + 4] == "null":
self.idx += 5
return None
else:
raise exceptions.ParsingError("Expected null")
def __deserialize_str__(
self, constructor: Type[types.supportsDeser]
) -> types.supportsDeser:
return constructor.__from_str__(self._parse_str())
def __deserialize_dict__(
self, constructor: Type[types.supportsDeser]
) -> types.supportsDeser:
res = {}
if self.s[self.idx] != "{":
raise exceptions.ParsingError("Expected dict")
# for each attribute in the schema
attrs = get_type_hints(constructor, include_extras=True)
for key in attrs:
# skip past first { and subsequent ,
chunk = STRING_RE.match(self.s, self.idx + 2)
if chunk is None:
raise exceptions.ParsingError("Expected key string.")
parsed_key, _ = chunk.groups()
self.idx = chunk.end()
assert parsed_key == key
if self.s[self.idx] != ":":
raise exceptions.ParsingError("Expected value for key")
self.idx += 1
# parse the value according to schema
print(attrs[key])
value = self.dispatch(attrs[key])
res[fields.Str(parsed_key)] = value
return constructor.__from_dict__(res)
def __deserialize_bool__(
self, constructor: Type[types.supportsDeser]
) -> types.supportsDeser:
return constructor.__from_bool__(self._parse_bool())
def __deserialize_int__(
self, constructor: Type[types.supportsDeser]
) -> types.supportsDeser:
return constructor.__from_int__(self._parse_int())
def __deserialize_float__(
self, constructor: Type[types.supportsDeser]
) -> types.supportsDeser:
return constructor.__from_float__(self._parse_float())
def __deserialize_none__(
self, constructor: Type[types.supportsDeser]
) -> types.supportsDeser:
# consume null
self._parse_none()
return constructor.__from_none__(None)
def dispatch(self, cls: Type[types.supportsDeser]) -> types.supportsDeser:
if issubclass(cls, fields.Bool):
return cls(False).__deserialize__(self)
elif issubclass(cls, fields.Int):
return cls(0).__deserialize__(self)
elif issubclass(cls, fields.Float):
return cls(0.0).__deserialize__(self)
elif issubclass(cls, fields.Str):
return cls("").__deserialize__(self)
elif issubclass(cls, fields.Null):
return cls(None).__deserialize__(self)
else:
return cls({}).__deserialize__(self)
def deserialize(input: str, cls: Type[types.supportsDeser]) -> types.supportsDeser:
"""Deserialize any valid input into an instance of `cls`.
Args:
input (str): A string composed of the input to be deserialized.
cls (Type[types.supportsDeser]): The class from which to create an instance for the deserialized input.
Returns:
types.supportsDeser: An instance of `cls` deserialized from input `input`.
"""
format = JSON(input)
# initialize empty instance to dispatch deserialize
# TODO: investigate why protocol is not working with class method
return format.dispatch(cls)
|
[
"volga.exceptions.ParsingError",
"typing.get_type_hints",
"volga.format.dispatch",
"volga.fields.Str",
"re.compile"
] |
[((386, 418), 're.compile', 're.compile', (['"""(.*?)(")"""', 'RE_FLAGS'], {}), '(\'(.*?)(")\', RE_FLAGS)\n', (396, 418), False, 'import re\n'), ((471, 540), 're.compile', 're.compile', (['"""(-?(?:0|[1-9]\\\\d*))(\\\\.\\\\d+)?([eE][-+]?\\\\d+)?"""', 'RE_FLAGS'], {}), "('(-?(?:0|[1-9]\\\\d*))(\\\\.\\\\d+)?([eE][-+]?\\\\d+)?', RE_FLAGS)\n", (481, 540), False, 'import re\n'), ((5974, 5994), 'volga.format.dispatch', 'format.dispatch', (['cls'], {}), '(cls)\n', (5989, 5994), True, 'import volga.format as format\n'), ((3257, 3305), 'typing.get_type_hints', 'get_type_hints', (['constructor'], {'include_extras': '(True)'}), '(constructor, include_extras=True)\n', (3271, 3305), False, 'from typing import get_type_hints\n'), ((1523, 1564), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected float"""'], {}), "('Expected float')\n", (1546, 1564), True, 'import volga.exceptions as exceptions\n'), ((1957, 1996), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected int"""'], {}), "('Expected int')\n", (1980, 1996), True, 'import volga.exceptions as exceptions\n'), ((2206, 2248), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected String"""'], {}), "('Expected String')\n", (2229, 2248), True, 'import volga.exceptions as exceptions\n'), ((2377, 2424), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Unterminated string."""'], {}), "('Unterminated string.')\n", (2400, 2424), True, 'import volga.exceptions as exceptions\n'), ((2755, 2795), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected null"""'], {}), "('Expected null')\n", (2778, 2795), True, 'import volga.exceptions as exceptions\n'), ((3155, 3195), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected dict"""'], {}), "('Expected dict')\n", (3178, 3195), True, 'import volga.exceptions as exceptions\n'), ((1177, 1217), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected bool"""'], {}), "('Expected bool')\n", (1200, 1217), True, 'import volga.exceptions as exceptions\n'), ((1767, 1817), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected integer value."""'], {}), "('Expected integer value.')\n", (1790, 1817), True, 'import volga.exceptions as exceptions\n'), ((3493, 3540), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected key string."""'], {}), "('Expected key string.')\n", (3516, 3540), True, 'import volga.exceptions as exceptions\n'), ((3721, 3770), 'volga.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Expected value for key"""'], {}), "('Expected value for key')\n", (3744, 3770), True, 'import volga.exceptions as exceptions\n'), ((3941, 3963), 'volga.fields.Str', 'fields.Str', (['parsed_key'], {}), '(parsed_key)\n', (3951, 3963), True, 'import volga.fields as fields\n')]
|
""" Cisco_IOS_XR_lib_mpp_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR lib\-mpp package operational data.
This module contains definitions
for the following management objects\:
management\-plane\-protection\: Management Plane Protection (MPP)
operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class MppAllowEnum(Enum):
"""
MppAllowEnum
MPP protocol types
.. data:: ssh = 0
SSH protocol
.. data:: telnet = 1
TELNET protocol
.. data:: snmp = 2
SNMP protocol
.. data:: tftp = 3
TFTP protocol
.. data:: http = 4
HTTP protocol
.. data:: xr_xml = 5
XML
.. data:: netconf = 6
NETCONF protocol
.. data:: all = 7
All
"""
ssh = 0
telnet = 1
snmp = 2
tftp = 3
http = 4
xr_xml = 5
netconf = 6
all = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['MppAllowEnum']
class MppAfIdBaseIdentity(object):
"""
Base identity for Mpp\-af\-id
"""
_prefix = 'Cisco-IOS-XR-lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['MppAfIdBaseIdentity']['meta_info']
class ManagementPlaneProtection(object):
"""
Management Plane Protection (MPP) operational
data
.. attribute:: inband
Management Plane Protection (MPP) inband interface data
**type**\: :py:class:`Inband <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Inband>`
.. attribute:: outband
Management Plane Protection (MPP) outband interface data
**type**\: :py:class:`Outband <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Outband>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.inband = ManagementPlaneProtection.Inband()
self.inband.parent = self
self.outband = ManagementPlaneProtection.Outband()
self.outband.parent = self
class Outband(object):
"""
Management Plane Protection (MPP) outband
interface data
.. attribute:: interfaces
List of inband/outband interfaces
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Outband.Interfaces>`
.. attribute:: vrf
Outband VRF information
**type**\: :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Outband.Vrf>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.interfaces = ManagementPlaneProtection.Outband.Interfaces()
self.interfaces.parent = self
self.vrf = ManagementPlaneProtection.Outband.Vrf()
self.vrf.parent = self
class Vrf(object):
"""
Outband VRF information
.. attribute:: vrf_name
Outband VRF name
**type**\: str
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.vrf_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:outband/Cisco-IOS-XR-lib-mpp-oper:vrf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Outband.Vrf']['meta_info']
class Interfaces(object):
"""
List of inband/outband interfaces
.. attribute:: interface
MPP interface information
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Outband.Interfaces.Interface>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
MPP interface information
.. attribute:: interface_name <key>
Interface name, specify 'all' for all interfaces
**type**\: str
.. attribute:: protocol
MPP Interface protocols
**type**\: list of :py:class:`Protocol <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.interface_name = None
self.protocol = YList()
self.protocol.parent = self
self.protocol.name = 'protocol'
class Protocol(object):
"""
MPP Interface protocols
.. attribute:: allow
MPP allow
**type**\: :py:class:`MppAllowEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.MppAllowEnum>`
.. attribute:: is_all_peers_allowed
If TRUE, all peers are allowed
**type**\: bool
.. attribute:: peer_address
List of peer addresses
**type**\: list of :py:class:`PeerAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol.PeerAddress>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.allow = None
self.is_all_peers_allowed = None
self.peer_address = YList()
self.peer_address.parent = self
self.peer_address.name = 'peer_address'
class PeerAddress(object):
"""
List of peer addresses
.. attribute:: af_name
AFName
**type**\: :py:class:`MppAfIdBaseIdentity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.MppAfIdBaseIdentity>`
.. attribute:: ipv4_address
IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPv6 address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.af_name = None
self.ipv4_address = None
self.ipv6_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-lib-mpp-oper:peer-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.ipv4_address is not None:
return True
if self.ipv6_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol.PeerAddress']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-lib-mpp-oper:protocol'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.allow is not None:
return True
if self.is_all_peers_allowed is not None:
return True
if self.peer_address is not None:
for child_ref in self.peer_address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol']['meta_info']
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:outband/Cisco-IOS-XR-lib-mpp-oper:interfaces/Cisco-IOS-XR-lib-mpp-oper:interface[Cisco-IOS-XR-lib-mpp-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.protocol is not None:
for child_ref in self.protocol:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:outband/Cisco-IOS-XR-lib-mpp-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Outband.Interfaces']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:outband'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.vrf is not None and self.vrf._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Outband']['meta_info']
class Inband(object):
"""
Management Plane Protection (MPP) inband
interface data
.. attribute:: interfaces
List of inband/outband interfaces
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Inband.Interfaces>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.interfaces = ManagementPlaneProtection.Inband.Interfaces()
self.interfaces.parent = self
class Interfaces(object):
"""
List of inband/outband interfaces
.. attribute:: interface
MPP interface information
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Inband.Interfaces.Interface>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
MPP interface information
.. attribute:: interface_name <key>
Interface name, specify 'all' for all interfaces
**type**\: str
.. attribute:: protocol
MPP Interface protocols
**type**\: list of :py:class:`Protocol <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.interface_name = None
self.protocol = YList()
self.protocol.parent = self
self.protocol.name = 'protocol'
class Protocol(object):
"""
MPP Interface protocols
.. attribute:: allow
MPP allow
**type**\: :py:class:`MppAllowEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.MppAllowEnum>`
.. attribute:: is_all_peers_allowed
If TRUE, all peers are allowed
**type**\: bool
.. attribute:: peer_address
List of peer addresses
**type**\: list of :py:class:`PeerAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol.PeerAddress>`
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.allow = None
self.is_all_peers_allowed = None
self.peer_address = YList()
self.peer_address.parent = self
self.peer_address.name = 'peer_address'
class PeerAddress(object):
"""
List of peer addresses
.. attribute:: af_name
AFName
**type**\: :py:class:`MppAfIdBaseIdentity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper.MppAfIdBaseIdentity>`
.. attribute:: ipv4_address
IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPv6 address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.af_name = None
self.ipv4_address = None
self.ipv6_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-lib-mpp-oper:peer-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.ipv4_address is not None:
return True
if self.ipv6_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol.PeerAddress']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-lib-mpp-oper:protocol'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.allow is not None:
return True
if self.is_all_peers_allowed is not None:
return True
if self.peer_address is not None:
for child_ref in self.peer_address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol']['meta_info']
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:inband/Cisco-IOS-XR-lib-mpp-oper:interfaces/Cisco-IOS-XR-lib-mpp-oper:interface[Cisco-IOS-XR-lib-mpp-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.protocol is not None:
for child_ref in self.protocol:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:inband/Cisco-IOS-XR-lib-mpp-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Inband.Interfaces']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection/Cisco-IOS-XR-lib-mpp-oper:inband'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection.Inband']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lib-mpp-oper:management-plane-protection'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.inband is not None and self.inband._has_data():
return True
if self.outband is not None and self.outband._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['ManagementPlaneProtection']['meta_info']
class Ipv4Identity(MppAfIdBaseIdentity):
"""
IPv4 address family
"""
_prefix = 'Cisco-IOS-XR-lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
MppAfIdBaseIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['Ipv4Identity']['meta_info']
class Ipv6Identity(MppAfIdBaseIdentity):
"""
IPv6 address family
"""
_prefix = 'Cisco-IOS-XR-lib-mpp-oper'
_revision = '2015-01-07'
def __init__(self):
MppAfIdBaseIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_mpp_oper as meta
return meta._meta_table['Ipv6Identity']['meta_info']
|
[
"ydk.errors.YPYModelError",
"ydk.types.YList"
] |
[((5285, 5292), 'ydk.types.YList', 'YList', ([], {}), '()\n', (5290, 5292), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((16567, 16574), 'ydk.types.YList', 'YList', ([], {}), '()\n', (16572, 16574), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((6263, 6270), 'ydk.types.YList', 'YList', ([], {}), '()\n', (6268, 6270), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((17544, 17551), 'ydk.types.YList', 'YList', ([], {}), '()\n', (17549, 17551), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((7621, 7628), 'ydk.types.YList', 'YList', ([], {}), '()\n', (7626, 7628), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((12341, 12393), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""Key property interface_name is None"""'], {}), "('Key property interface_name is None')\n", (12354, 12393), False, 'from ydk.errors import YPYError, YPYModelError\n'), ((18901, 18908), 'ydk.types.YList', 'YList', ([], {}), '()\n', (18906, 18908), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((23619, 23671), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""Key property interface_name is None"""'], {}), "('Key property interface_name is None')\n", (23632, 23671), False, 'from ydk.errors import YPYError, YPYModelError\n'), ((10957, 11013), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""parent is not set . Cannot derive path."""'], {}), "('parent is not set . Cannot derive path.')\n", (10970, 11013), False, 'from ydk.errors import YPYError, YPYModelError\n'), ((22236, 22292), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""parent is not set . Cannot derive path."""'], {}), "('parent is not set . Cannot derive path.')\n", (22249, 22292), False, 'from ydk.errors import YPYError, YPYModelError\n'), ((9613, 9669), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""parent is not set . Cannot derive path."""'], {}), "('parent is not set . Cannot derive path.')\n", (9626, 9669), False, 'from ydk.errors import YPYError, YPYModelError\n'), ((20893, 20949), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""parent is not set . Cannot derive path."""'], {}), "('parent is not set . Cannot derive path.')\n", (20906, 20949), False, 'from ydk.errors import YPYError, YPYModelError\n')]
|
import os
import json
import pprint
import math
import re
services = None
cfn_spec = None
tf_resources = []
cfn_types = []
cfn_occurances = []
tf_occurances = []
cfn_exceptions = {
'AWS::CloudFormation::CustomResource': 'N/A',
'AWS::CloudFormation::Macro': 'N/A',
'AWS::CloudFormation::Stack': 'N/A',
'AWS::CloudFormation::WaitCondition': 'N/A',
'AWS::CloudFormation::WaitConditionHandle': 'N/A',
'AWS::EC2::SecurityGroupEgress': 'N/A',
'AWS::EC2::SecurityGroupIngress': 'N/A',
'AWS::EC2::TrunkInterfaceAssociation': 'N/A',
'AWS::ElastiCache::SecurityGroupIngress': 'N/A',
'AWS::Redshift::ClusterSecurityGroupIngress': 'N/A',
'AWS::Route53::RecordSetGroup': 'N/A',
'AWS::SDB::Domain': 'N/A',
'AWS::IAM::UserToGroupAddition': 'N/A'
}
tf_exceptions = {
'aws_cloudformation_stack': 'N/A',
'aws_cloudformation_stack_set': 'N/A',
'aws_cloudformation_stack_set_instance': 'N/A'
}
with open("util/cfnspec.json", "r") as f:
cfn_spec = json.loads(f.read())['ResourceTypes']
with open("util/tf_resources.txt", "r") as f:
lines = f.read().splitlines()
for line in lines:
tf_resources.append(line)
for cfntype, _ in cfn_spec.items():
cfn_types.append(cfntype)
cfn_types.append("AWS::Lambda::LayerVersionPermission")
cfn_types.append("AWS::EC2::VPCEndpointService")
cfn_types.append("AWS::Lambda::LayerVersion")
cfn_types.append("AWS::EC2::CapacityReservation")
cfn_types = set(cfn_types)
with open("js/mappings.js", "r") as f:
text = f.read()
lines = text.splitlines()
cfn_occurances += re.compile(r'(AWS\:\:[a-zA-Z0-9]+\:\:[a-zA-Z0-9]+)').findall(text)
tf_occurances += re.compile(r'terraformType\'\:\ \'(aws(?:\_[a-zA-Z0-9]+)+)\'').findall(text)
total_services = 0
total_operations = 0
total_unique_occurances = 0
with open("RESOURCE_COVERAGE.md", "w") as f:
f.write("## CloudFormation Resource Coverage\n\n")
f.write("**%s/%s (%s%%)** Resources Covered\n" % (
len(set(cfn_occurances)) + len(cfn_exceptions),
len(cfn_types),
int(math.floor((len(set(cfn_occurances)) + len(cfn_exceptions)) * 100 / len(cfn_types)))
))
f.write("\n| Type | Coverage |\n")
f.write("| --- | --- |\n")
for cfntype in sorted(cfn_types):
coverage = ""
if cfn_occurances.count(cfntype) > 0:
coverage = ":thumbsup:"
if cfntype in cfn_exceptions:
coverage = cfn_exceptions[cfntype]
f.write("| *%s* | %s |\n" % (cfntype, coverage))
f.write("\n## Terraform Coverage\n\n")
f.write("**%s/%s (%s%%)** Resources Covered\n" % (
len(set(tf_occurances)) + len(tf_exceptions),
len(tf_resources),
int(math.floor((len(set(tf_occurances)) + len(tf_exceptions)) * 100 / len(tf_resources)))
))
f.write("\n| Type | Coverage |\n")
f.write("| --- | --- |\n")
for tf_resource in sorted(tf_resources):
coverage = ""
if tf_occurances.count(tf_resource) > 0:
coverage = ":thumbsup:"
if tf_resource in tf_exceptions:
coverage = tf_exceptions[tf_resource]
f.write("| *%s* | %s |\n" % (tf_resource, coverage))
|
[
"re.compile"
] |
[((1582, 1637), 're.compile', 're.compile', (['"""(AWS\\\\:\\\\:[a-zA-Z0-9]+\\\\:\\\\:[a-zA-Z0-9]+)"""'], {}), "('(AWS\\\\:\\\\:[a-zA-Z0-9]+\\\\:\\\\:[a-zA-Z0-9]+)')\n", (1592, 1637), False, 'import re\n'), ((1670, 1737), 're.compile', 're.compile', (['"""terraformType\\\\\'\\\\:\\\\ \\\\\'(aws(?:\\\\_[a-zA-Z0-9]+)+)\\\\\'"""'], {}), '("terraformType\\\\\'\\\\:\\\\ \\\\\'(aws(?:\\\\_[a-zA-Z0-9]+)+)\\\\\'")\n', (1680, 1737), False, 'import re\n')]
|
import sys
import shutil
import subprocess
from pathlib import Path
from functools import partial
import pandas as pd
import pysam
import astk.utils.func as ul
def site_flanking(chrN, site, sam, control_sam=None, window=150, bins=15):
def signal_func(chrN, start, end, sam, control_sam):
if control_sam:
treat = sam.count(chrN, start, end)
control = control_sam.count(chrN, start, end)
signal = treat / max(control, 0.9)
else:
signal = 1e6*sam.count(chrN, start, end)/sam.mapped
return signal
psignal_func = partial(signal_func, sam=sam, control_sam=control_sam)
up_bin_start_idx = sorted(list(range(site+1, site-window, -bins)))[:-1]
down_bin_end_idx = list(range(site+1, site+window+2, bins))[:-1]
up_signal = [psignal_func(chrN, i-1, i+bins-1) for i in up_bin_start_idx]
down_signal = [psignal_func(chrN, i-1, i+bins-1) for i in down_bin_end_idx]
df = pd.concat([
pd.DataFrame({"signal": up_signal, "direction": "upstream"}),
pd.DataFrame({"signal": down_signal, "direction": "downstream"})
])
return df
#TO-DO make it faster
def epi_signal(out, achor_dic, bam_meta, width, binsize):
df = pd.read_csv(bam_meta)
cds = set(df.condition)
df_ls = []
if len(cds) == 1:
tdf_iter = [sdf for _,sdf in df.iterrows()]
cdf_iter = [None] * df.shape[0]
elif cds == {'control', 'treatment'}:
tdf = df.loc[df.condition == "treatment", ]
cdf = df.loc[df.condition == "control", ]
if tdf.shape != cdf.shape:
raise ValueError("control input number dismatched treatment input")
tdf_iter = [sdf for _,sdf in tdf.iterrows()]
cdf_iter = [sdf for _,sdf in cdf.iterrows()]
for c, t in zip(cdf_iter, tdf_iter):
if c is None:
csam = None
else:
csam = pysam.AlignmentFile(c["path"])
tsam = pysam.AlignmentFile(t["path"])
for an in achor_dic:
anchor_df = pd.read_csv(achor_dic[an], sep="\t", header=None)
for ai, site_row in anchor_df.iterrows():
chrN, pos = site_row[0], int(site_row[1])
sdf = site_flanking(chrN, pos, tsam, control_sam=csam, window=width, bins=binsize)
sdf["event_idx"] = ai
sdf["mark"] = t["name"]
sdf["rep"] = t["replicate"]
sdf["anchor"] = an
df_ls.append(sdf)
dfs = pd.concat(df_ls)
dfs["bin"] = dfs.index
dfs.to_csv(out, index=False)
def epi_sc(output, metadata, anchor, name, width, binsize):
names = name if len(name)==len(anchor) else list(range(1, len(anchor)+1))
anchor_dic = dict(zip(names, anchor))
epi_signal(output, anchor_dic, metadata, width, binsize)
def epihm(output, files, fmt, width, height, resolution):
rscript = Path(__file__).parent / "R" / "signalHeatmap.R"
param_dic = {
"file": files,
"width": width,
"height": height,
"resolution": resolution,
"fmt": fmt,
"output": output
}
param_ls = ul.parse_cmd_r(**param_dic)
subprocess.run(["Rscript", rscript, *param_ls])
def epiline(output, file, fmt, width, height, resolution):
rscript = Path(__file__).parent / "R" / "signalProfile.R"
param_dic = {
"file": file,
"width": width,
"height": height,
"resolution": resolution,
"fmt": fmt,
"output": output
}
param_ls = ul.parse_cmd_r(**param_dic)
subprocess.run(["Rscript", rscript, *param_ls])
def sigcmp(output, file, fmt, width, height, resolution):
rscript = Path(__file__).parent / "R" / "signalCompare.R"
param_dic = {
"file": file,
"width": width,
"height": height,
"resolution": resolution,
"fmt": fmt,
"output": output
}
param_ls = ul.parse_cmd_r(**param_dic)
subprocess.run(["Rscript", rscript, *param_ls])
def mark(output, celltype, bed, marknum, sep, markindex, markname, stacked):
from itertools import chain
import pandas as pd
if all([sep, markindex]):
marks = [Path(i).stem.split(sep)[markindex-1] for i in bed]
else:
marks = markname
if len(marks) != len(bed):
print("mark number dismatchs with bed files ")
sys.exit(1)
if len(celltype) == len(marknum):
cells = [i for i in chain(*[[celltype[i]]*int(marknum[i]) for i in range(len(celltype))])]
elif len(celltype) == 1:
cells = [celltype[0] for _ in bed]
else:
print("ERROR: -ct/--cellType and -mn/--markNum is not dismatched")
sys.exit(1)
if len(cells) != len(marks):
print("-mn/--markNum is wrong")
sys.exit(1)
if stacked:
marks = [f"{cells[idx]}_{marks[idx]}" for idx in range(len(marks))]
cells = [f"genome" for _ in range(len(marks))]
df = pd.DataFrame({
"cell": cells,
"mark": marks,
"bed": bed
})
df.to_csv(output, sep="\t", index=False, header=False)
CHROMSIZES_dir = Path(__file__).parent / "ChromHMM/CHROMSIZES"
genomes = [i.stem for i in CHROMSIZES_dir.glob("*.txt")]
def LearnState(numstates, markfile, directory, binarydir , outdir, binsize, genome,
mx, processor, anchordir, coordir, no_binary, name, stacked, nostrand,defaultcoor):
ChromHMM_dir = Path(__file__).parent / "ChromHMM"
ChromHMM_jar = f"java -mx{mx} -jar {ChromHMM_dir/'ChromHMM.jar'}"
uv_params = ""
if coordir:
p_coordir = Path(coordir)
uv_params += f"-u {p_coordir.absolute()} "
(p_coordir / f"{genome}").mkdir(exist_ok=True)
if defaultcoor:
shutil.copytree(ChromHMM_dir/f"COORDS/{genome}", p_coordir/f"{genome}", dirs_exist_ok=True)
for file in p_coordir.glob("*"):
if file.is_file(): shutil.copy(file, p_coordir/f"{genome}")
if anchordir:
p_anchordir = Path(anchordir)
uv_params += f"-v {p_anchordir.absolute()}"
(p_anchordir / f"{genome}").mkdir(exist_ok=True)
if defaultcoor:
shutil.copytree(ChromHMM_dir/f"ANCHORFILES/{genome}", p_anchordir/f"{genome}", dirs_exist_ok=True)
for file in p_anchordir.glob("*"):
if file.is_file(): shutil.copy(file, p_anchordir/f"{genome}")
stacked_param = "-stacked" if stacked else ""
chrom_len = ChromHMM_dir / f"CHROMSIZES/{genome}.txt"
strand_param = "-nostrand" if nostrand else ""
name_param = f"-i {name}"
ChromHMM_bin = f"BinarizeBed {stacked_param} -peaks -b {binsize} {chrom_len} {directory} {markfile} {binarydir}"
ChromHMM_lm = f"LearnModel {uv_params} {strand_param} {name_param} -p {processor} \
-b {binsize} {binarydir} {outdir} {numstates} {genome}"
if not no_binary:
print(ChromHMM_bin)
info = subprocess.Popen([*ChromHMM_jar.split(), *ChromHMM_bin.split()])
info.wait()
print(ChromHMM_lm)
info = subprocess.Popen([*ChromHMM_jar.split(), *ChromHMM_lm.split()])
info.wait()
rscript = Path(__file__).parent / "R" / "ChromHMM_hm.R"
for of in Path(outdir).glob("*_overlap.txt"):
info = subprocess.Popen(["Rscript", str(rscript), of])
info.wait()
|
[
"pandas.DataFrame",
"functools.partial",
"subprocess.run",
"astk.utils.func.parse_cmd_r",
"pandas.read_csv",
"pysam.AlignmentFile",
"pathlib.Path",
"shutil.copy",
"shutil.copytree",
"pandas.concat",
"sys.exit"
] |
[((599, 653), 'functools.partial', 'partial', (['signal_func'], {'sam': 'sam', 'control_sam': 'control_sam'}), '(signal_func, sam=sam, control_sam=control_sam)\n', (606, 653), False, 'from functools import partial\n'), ((1235, 1256), 'pandas.read_csv', 'pd.read_csv', (['bam_meta'], {}), '(bam_meta)\n', (1246, 1256), True, 'import pandas as pd\n'), ((2499, 2515), 'pandas.concat', 'pd.concat', (['df_ls'], {}), '(df_ls)\n', (2508, 2515), True, 'import pandas as pd\n'), ((3138, 3165), 'astk.utils.func.parse_cmd_r', 'ul.parse_cmd_r', ([], {}), '(**param_dic)\n', (3152, 3165), True, 'import astk.utils.func as ul\n'), ((3170, 3217), 'subprocess.run', 'subprocess.run', (["['Rscript', rscript, *param_ls]"], {}), "(['Rscript', rscript, *param_ls])\n", (3184, 3217), False, 'import subprocess\n'), ((3534, 3561), 'astk.utils.func.parse_cmd_r', 'ul.parse_cmd_r', ([], {}), '(**param_dic)\n', (3548, 3561), True, 'import astk.utils.func as ul\n'), ((3566, 3613), 'subprocess.run', 'subprocess.run', (["['Rscript', rscript, *param_ls]"], {}), "(['Rscript', rscript, *param_ls])\n", (3580, 3613), False, 'import subprocess\n'), ((3929, 3956), 'astk.utils.func.parse_cmd_r', 'ul.parse_cmd_r', ([], {}), '(**param_dic)\n', (3943, 3956), True, 'import astk.utils.func as ul\n'), ((3961, 4008), 'subprocess.run', 'subprocess.run', (["['Rscript', rscript, *param_ls]"], {}), "(['Rscript', rscript, *param_ls])\n", (3975, 4008), False, 'import subprocess\n'), ((4956, 5012), 'pandas.DataFrame', 'pd.DataFrame', (["{'cell': cells, 'mark': marks, 'bed': bed}"], {}), "({'cell': cells, 'mark': marks, 'bed': bed})\n", (4968, 5012), True, 'import pandas as pd\n'), ((1952, 1982), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (["t['path']"], {}), "(t['path'])\n", (1971, 1982), False, 'import pysam\n'), ((4374, 4385), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4382, 4385), False, 'import sys\n'), ((4787, 4798), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4795, 4798), False, 'import sys\n'), ((5123, 5137), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5127, 5137), False, 'from pathlib import Path\n'), ((5596, 5609), 'pathlib.Path', 'Path', (['coordir'], {}), '(coordir)\n', (5600, 5609), False, 'from pathlib import Path\n'), ((5997, 6012), 'pathlib.Path', 'Path', (['anchordir'], {}), '(anchordir)\n', (6001, 6012), False, 'from pathlib import Path\n'), ((988, 1048), 'pandas.DataFrame', 'pd.DataFrame', (["{'signal': up_signal, 'direction': 'upstream'}"], {}), "({'signal': up_signal, 'direction': 'upstream'})\n", (1000, 1048), True, 'import pandas as pd\n'), ((1058, 1122), 'pandas.DataFrame', 'pd.DataFrame', (["{'signal': down_signal, 'direction': 'downstream'}"], {}), "({'signal': down_signal, 'direction': 'downstream'})\n", (1070, 1122), True, 'import pandas as pd\n'), ((1905, 1935), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (["c['path']"], {}), "(c['path'])\n", (1924, 1935), False, 'import pysam\n'), ((2037, 2086), 'pandas.read_csv', 'pd.read_csv', (['achor_dic[an]'], {'sep': '"""\t"""', 'header': 'None'}), "(achor_dic[an], sep='\\t', header=None)\n", (2048, 2086), True, 'import pandas as pd\n'), ((4688, 4699), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4696, 4699), False, 'import sys\n'), ((5431, 5445), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5435, 5445), False, 'from pathlib import Path\n'), ((5752, 5851), 'shutil.copytree', 'shutil.copytree', (["(ChromHMM_dir / f'COORDS/{genome}')", "(p_coordir / f'{genome}')"], {'dirs_exist_ok': '(True)'}), "(ChromHMM_dir / f'COORDS/{genome}', p_coordir / f'{genome}',\n dirs_exist_ok=True)\n", (5767, 5851), False, 'import shutil\n'), ((6158, 6264), 'shutil.copytree', 'shutil.copytree', (["(ChromHMM_dir / f'ANCHORFILES/{genome}')", "(p_anchordir / f'{genome}')"], {'dirs_exist_ok': '(True)'}), "(ChromHMM_dir / f'ANCHORFILES/{genome}', p_anchordir /\n f'{genome}', dirs_exist_ok=True)\n", (6173, 6264), False, 'import shutil\n'), ((7189, 7201), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (7193, 7201), False, 'from pathlib import Path\n'), ((2897, 2911), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2901, 2911), False, 'from pathlib import Path\n'), ((3294, 3308), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3298, 3308), False, 'from pathlib import Path\n'), ((3689, 3703), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3693, 3703), False, 'from pathlib import Path\n'), ((5916, 5958), 'shutil.copy', 'shutil.copy', (['file', "(p_coordir / f'{genome}')"], {}), "(file, p_coordir / f'{genome}')\n", (5927, 5958), False, 'import shutil\n'), ((6331, 6375), 'shutil.copy', 'shutil.copy', (['file', "(p_anchordir / f'{genome}')"], {}), "(file, p_anchordir / f'{genome}')\n", (6342, 6375), False, 'import shutil\n'), ((7129, 7143), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (7133, 7143), False, 'from pathlib import Path\n'), ((4193, 4200), 'pathlib.Path', 'Path', (['i'], {}), '(i)\n', (4197, 4200), False, 'from pathlib import Path\n')]
|
# Import essential libraries
import requests
import cv2
import numpy as np
import imutils
import mediapipe as mp
import threading
import pygame.mixer
from pygame import *
import time
import os
import sys
import multiprocessing
#Global variables definition
landmarks= {'thumb': [1,2,3,4], 'index': [5,6,7,8], 'middle': [9,10,11,12], 'ring': [13,14,15,16], 'little': [17,18,19,20]} #Position landmarks index corresponding to each finger. Refer to mediapipe github repo for more details
tip_landmarks = [4,8,12,16,20] #index of tip position of all fingers
dist_threshold_param= {'thumb': 8.6, 'index': 6, 'middle': 6, 'ring': 6, 'little': 5} #customized dist threshold values for calibration of finger_detect_and_compute module
left_detect=np.zeros(5);right_detect=np.zeros(5) #arrays representing detected finger presses for each hand
left_coordinates=np.zeros((5,2));right_coordinates=np.zeros((5,2)) #arrays representing pixel coordinates of each detected finger press (tip landmark)
bboxes_white=np.zeros((52,4)) #initializing bboxes for all white keys in standard 88key piano
bboxes_black=np.zeros((36,4)) #initializing bboxes for all black keys in standard 88key piano
start_x=40; start_y=250; #starting pixel coordinates of piano
white_key_width=10; white_key_height=80; black_key_width=5; black_key_height=40 #params related to piano visualization
white_key_reference=[]#list containing reference key values for all white keys.
black_key_reference=[]#list containing reference key values for all black keys.
key_index_array=[]#stores indexes and colors for all detected key presses
play_music_status=1
visualizer_status=1
class handDetector():
def __init__(self, mode=False, maxHands=4, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands #Max no of hands to be detected in one frame.
self.detectionCon = detectionCon #detection confidence
self.trackCon = trackCon #tracking confidence--enables tracking rather than detection on every frame if tracking confidence is good (improves fps)
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands()
self.mpDraw = mp.solutions.drawing_utils #drawing object used for drawing later on the image
def findHands(self, img, draw=True):
""" Function: Get results and Draw landmarks on the read image for all hands detected in the frame
Arguments: self, img: image to draw landmarks on,
draw: if True, draws landmarks on the image frame
returns: img: final image with the landmarks """
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
""" Function: Store position of all landmarks corresponding to a hand in a list
Arguments: self, img: image to draw landmarks on.
draw: If True, draws landmarks on the image frame.
handNo: index of corresponding hand (left or right)
returns: List: List of image coordinates and id's of position landmarks of a hand """
List = []
if (self.results.multi_hand_landmarks):
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
List.append([id, cx, cy])
# if draw:
# cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
List=np.array(List)
return List
def handsCount(self):
""" Function: calculates the total no of hands detected in an image frame
Arguments: self
returns: total no of hands detected in a frame """
# Returns the no of hand detected in the image frame
dim=np.shape(np.array(self.results.multi_hand_landmarks))
if(dim):
return dim[0]
else:
return 0
def check_threshold(p1,p2,p3,finger):
""" Function: Checks whether a key press is detected for a finger based on a mathematical condition
Arguments: p1,p2,p3: positions of landmarks of a finger
finger: string name of the finger pressed (not required)
returns: boolean value of whether key press is detected or not """
global dist_threshold_param
p1=p1/10
p2=p2/10
p3=p3/10
dist = np.linalg.norm(p1 - p2) + np.linalg.norm(p3 - p2) + np.linalg.norm(p1 - p3) #Calculating sum of absolute distances b/w three landmark points of a finger. This is a noobie algo. Can be improved!
return (dist<dist_threshold_param[finger]) #return True if this is smaller than a prespecified threshold during calibration
def finger_detect_and_compute(list):
""" Function: Computes whether a key is actually pressed using fingers of a hand in an image frame. Also computes the coordinates of tip_landmarks corresponding to the pressed fingers
Arguments: list: a list containing all position landmarks of a hand
returns: detected_array: boolean array representing corresponding key presses
coordinates: pixel coordinates of the tip landmakrs of the pressed keys """
detect_array=np.array([(int)(check_threshold(list[2][1:3],list[3][1:3],list[4][1:3],'thumb')),(int)(check_threshold(list[6][1:3],list[7][1:3],list[8][1:3],'index')),(int)(check_threshold(list[10][1:3],list[11][1:3],list[12][1:3],'middle')),(int)(check_threshold(list[14][1:3],list[15][1:3],list[16][1:3],'ring')),(int)(check_threshold(list[18][1:3],list[19][1:3],list[20][1:3],'little'))])
coordinates=np.zeros((5,2))
for i in range(5):
if(detect_array[i]!=0):
coordinates[i]=list[tip_landmarks][i,1:3]
return detect_array,coordinates
def initialize_visualizer(img1):
""" Function: Initialize all variables important to piano visualization on image
Arguments: img1: Image to display piano image
returns: img_background: updated background image to display piano image """
global bboxes_white, bboxes_black, start_x, start_y, white_key_width, white_key_height, black_key_width, black_key_height
curr_x=start_x; curr_y=start_y
img_background=img1.copy()
for i in range(52):#Initializing 52 white piano keys
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+white_key_width,curr_y+white_key_height), [255,255,255], 2)
bboxes_white[i]=[curr_x,curr_y,curr_x+white_key_width,curr_y+white_key_height]
curr_x=curr_x + white_key_width
#Overlaying the first odd black key
curr_x= (int)(start_x + white_key_width-black_key_width/2.0)
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[0]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + 2*white_key_width
for i in range(7): #initializing the remaining black keys
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+1]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+2]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + 2*white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+3]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+4]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+5]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + 2*white_key_width
print("White_bboxes=",bboxes_white)
print("Black_bboxes=",bboxes_white)
return img_background
def visualizer(img_background):
""" Function: Visualize updated piano set on an image
Arguments: img_background:updated image to display piano image
returns: None """
global key_index_array,bboxes_white,bboxes_black
if(visualizer_status):
print("In thread1")
try:
if(len(key_index_array)!=0): #Makes the pressed piano keys in different color for better visualization
for key_index,color in key_index_array:
if(color=='white'):
xmin,ymin,xmax,ymax = bboxes_white[key_index]
start=(int(xmin),int(ymin))
end=(int(xmax),int(ymax))
print("start and end=",(start,end))
img_background_new=cv2.rectangle(img_background,start,end,(255,182,193),-1)
print('Printing key pressed-----------------------------',(key_index,color))
if(color=='black'):
xmin,ymin,xmax,ymax = bboxes_black[key_index]
start=(int(xmin),int(ymin))
end=(int(xmax),int(ymax))
print("start and end=",(start,end))
img_background_new=cv2.rectangle(img_background,start,end,(144,238,144),-1)
print('Printing key pressed-----------------------------',(key_index,color))
print("key_index_array=",key_index_array)
key_index_array=[]
except KeyboardInterrupt:
print("Exiting visualizer thread")
sys.exit()
def piano_key_initializer():
""" Function: Initialize piano keys for music. Used global variables white_key_reference and black_key_reference
Arguments: None
returns: None """
global white_key_reference, black_key_reference
white_key_reference.append('a0')
white_key_reference.append('b0')
black_key_reference.append('a-0')
for i in range(7):
white_key_reference.append('c'+str(i+1))
white_key_reference.append('d'+str(i+1))
white_key_reference.append('e'+str(i+1))
white_key_reference.append('f'+str(i+1))
white_key_reference.append('g'+str(i+1))
white_key_reference.append('a'+str(i+1))
white_key_reference.append('b'+str(i+1))
black_key_reference.append('c-'+str(i+1))
black_key_reference.append('d-'+str(i+1))
black_key_reference.append('f-'+str(i+1))
black_key_reference.append('g-'+str(i+1))
black_key_reference.append('a-'+str(i+1))
white_key_reference.append('c8')
print("Piano Keys Initialized Succesfully!")
def within_threshold(pos,item):
""" Function: check if the tip of pressed is within the threshold of piano key boundaries
Arguments: pos: x,y pixel coordinates of the tip of finger
item: boundaries of bbox of a particular key of a piano
returns: boolean value"""
if(pos[0]>item[0] and pos[0]<item[2] and pos[1]>item[1] and pos[1]<item[3]):
return True
else:
return False
def find_note(pos):
""" Function: Given coordinates of a key pressed (finger tip), returns string name of ogg file to be played!
Arguments: pos: x,y pixel coordinates of the tip of finger
returns: note: ogg file address
index: index of the pressed key
color: color of the pressed key: 'black or 'white """
x,y=pos
index=0
global bboxes_white,bboxes_black,white_key_reference,black_key_reference
for id, items in enumerate(bboxes_black):
if(within_threshold(pos,items)):
index=id
note=black_key_reference[id]
return note,index,'black'
for id, items in enumerate(bboxes_white):
if(within_threshold(pos,items)):
index=id
note=white_key_reference[id]
return note,index,'white'
return 'Wrong Press',100,'None'
def find_music_list(pos,num):
""" Function: Prepares the music list of piano keys to be played given the positions of all pressed piano keys and the no of keys pressed
Arguments: pos: positions of all finger tips corresponding to which a key press is detected
num: no of keys pressed at a time
returns: music_list: list of all piano music files to be played"""
music_list=[]; global key_index_array
for i in range(num):
note,key_index,color=find_note(pos[i])
if(note!='Wrong Press'):
key_index_array.append([key_index,color])
for fname in os.listdir('/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/'):
if note in fname:
note=fname
break
music_list.append('/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/'+ note)
return music_list
def build_music_list():
""" Function: Builds the list of piano keys to play music
Arguments: none
returns: music_list: list of all piano music files to be played"""
global left_detect,left_coordinates,right_detect,right_coordinates
positions=[];music_list=[]
if(play_music_status):
try:
for i in range(5):
if(left_detect[i]!=0):
positions.append(left_coordinates[i])
if(right_detect[i]!=0):
positions.append(right_coordinates[i])
num=len(positions)
print('num=',num)
if(num!=0):
music_list=find_music_list(positions,num)
print("Printing Music list in play_music:",music_list)
return music_list
except KeyboardInterrupt:
print("Exiting play music thread")
sys.exit()
def play_music(q,status):
""" Function: Plays piano music in a separate python process
Arguments: q: queue to pass music_list data among different python processes
status: can be used to switch off this process (not required)
returns: None"""
print("Processing play_music process")
while True:
try:
print("In the play_music function--Checking condition")
mixer.init()
pygame.mixer.set_num_channels(10) # default is 8
music_list=q.get()
if(len(music_list)!=0):
for id,items in enumerate(music_list):
pygame.mixer.Channel(id).play(pygame.mixer.Sound(music_list[id]))
import time
time.sleep(2)
except KeyboardInterrupt:
print("Play_music process stopped forcefully")
sys.exit()
def reinitialize():
""" Function: Reinitialize suitable global variables after every iteration
Arguments: none
returns: none"""
global right_detect,right_coordinates,left_detect,left_coordinates,key_index_array
left_detect=np.zeros(5);right_detect=np.zeros(5)
left_coordinates=np.zeros((5,2));right_coordinates=np.zeros((5,2))
key_index_array=[]
def processor(q,status):
""" Function: Primary process to read image frames from server->detect finger landmarks->find finger tip positions and build music lists
Arguments: none
returns: music_list: list of all piano music files to be played"""
# Declare useful variables
pTime = 0; cTime = 0; right_hand=1; left_hand=0
lmList=[]; rmList=[]
detector = handDetector()
global right_detect,right_coordinates,left_detect,left_coordinates,play_music_status,key_index_array
music_list_curr=[]
music_list_prev=[]
url = "http://192.168.29.189:8080/shot.jpg"
status.put(1)
# While loop to continuously fetching data from the Url
while True:
try:
print("Queue Size=",q.qsize())
# Read image data from server and preprocess
img_resp = requests.get(url)
img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)
img = cv2.imdecode(img_arr, -1)
img = imutils.resize(img, width=640, height=480)
# Detect finger landmarks in left (or/and right hand)
hands=1
img = detector.findHands(img) #draw hand landmarks on image
lmList = detector.findPosition(img,left_hand) #storing position of landmarks in an array
hands=detector.handsCount() #find total no of hands in image frame
print("No of hands are",hands)
if(hands>1):
rmList = detector.findPosition(img,right_hand)
if len(lmList) != 0:
left_detect,left_coordinates = finger_detect_and_compute(lmList)
print("Left Hand Detection Array=", left_detect)
print("left coordinates are", left_coordinates)
for i in range(5):
if(left_detect[i]!=0):
x,y=left_coordinates[i]
img=cv2.circle(img, (int(x),int(y)), 10, (10,50,50), 5)
if len(rmList) != 0 and hands>1:
right_detect,right_coordinates = finger_detect_and_compute(rmList)
print("Right Hand Detection Array=", right_detect)
print("Right coordinates are", right_coordinates)
for i in range(5):
if(right_detect[i]!=0):
x,y=right_coordinates[i]
img=cv2.circle(img, (int(x),int(y)), 10, (50,50,100), 5)
music_list_curr=build_music_list() # Build music list
if(len(music_list_curr)!=0) and music_list_curr!=music_list_prev:
q.put(music_list_curr) #Pass curr_music_list to another python process running play_music() function
music_list_prev=music_list_curr
if(len(music_list_curr)==0 and music_list_curr!=music_list_prev and status.qsize()<=1): # Empty queue if curr_music_list is empty--stop music
while not q.empty():
q.get()
img_background = initialize_visualizer(img)
visualizer(img_background) #Visualize virtual piano onscreen
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
lmList=[]
rmList=[]
reinitialize() # Reinitiaizing variables to initial values!
cv2.putText(img_background, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img_background)
cv2.waitKey(100)
time.sleep(0.1)
except KeyboardInterrupt:
print("Program Execution stopped forcefully! Killing all processes!")
play_music_status=0
visualizer_status=0
sys.exit()
# Main function for initiating target processes
def main():
piano_key_initializer()
q = multiprocessing.Queue()
status= multiprocessing.Queue()
# creating new processes
p1 = multiprocessing.Process(target=processor, args=(q,status,))
p2 = multiprocessing.Process(target=play_music, args=(q,status,))
p1.start()
p2.start()
print("Exiting main")
if __name__ == "__main__":
main()
|
[
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imdecode",
"numpy.zeros",
"time.time",
"time.sleep",
"numpy.array",
"numpy.linalg.norm",
"multiprocessing.Queue",
"cv2.rectangle",
"requests.get",
"imutils.resize",
"multiprocessing.Process",
"cv2.imshow",
"os.listdir",
"sys.exit"
] |
[((739, 750), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (747, 750), True, 'import numpy as np\n'), ((764, 775), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (772, 775), True, 'import numpy as np\n'), ((852, 868), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (860, 868), True, 'import numpy as np\n'), ((886, 902), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (894, 902), True, 'import numpy as np\n'), ((999, 1016), 'numpy.zeros', 'np.zeros', (['(52, 4)'], {}), '((52, 4))\n', (1007, 1016), True, 'import numpy as np\n'), ((1093, 1110), 'numpy.zeros', 'np.zeros', (['(36, 4)'], {}), '((36, 4))\n', (1101, 1110), True, 'import numpy as np\n'), ((6051, 6067), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (6059, 6067), True, 'import numpy as np\n'), ((7127, 7249), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7140, 7249), False, 'import cv2\n'), ((16022, 16033), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (16030, 16033), True, 'import numpy as np\n'), ((16047, 16058), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (16055, 16058), True, 'import numpy as np\n'), ((16080, 16096), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (16088, 16096), True, 'import numpy as np\n'), ((16114, 16130), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (16122, 16130), True, 'import numpy as np\n'), ((20071, 20094), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (20092, 20094), False, 'import multiprocessing\n'), ((20107, 20130), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (20128, 20130), False, 'import multiprocessing\n'), ((20169, 20228), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'processor', 'args': '(q, status)'}), '(target=processor, args=(q, status))\n', (20192, 20228), False, 'import multiprocessing\n'), ((20238, 20298), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'play_music', 'args': '(q, status)'}), '(target=play_music, args=(q, status))\n', (20261, 20298), False, 'import multiprocessing\n'), ((2606, 2642), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2618, 2642), False, 'import cv2\n'), ((3921, 3935), 'numpy.array', 'np.array', (['List'], {}), '(List)\n', (3929, 3935), True, 'import numpy as np\n'), ((4872, 4895), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p3)'], {}), '(p1 - p3)\n', (4886, 4895), True, 'import numpy as np\n'), ((6758, 6885), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + white_key_width, curr_y + white_key_height)', '[255, 255, 255]', '(2)'], {}), '(img_background, (curr_x, curr_y), (curr_x + white_key_width, \n curr_y + white_key_height), [255, 255, 255], 2)\n', (6771, 6885), False, 'import cv2\n'), ((7446, 7568), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7459, 7568), False, 'import cv2\n'), ((7714, 7836), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7727, 7836), False, 'import cv2\n'), ((7984, 8106), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7997, 8106), False, 'import cv2\n'), ((8252, 8374), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (8265, 8374), False, 'import cv2\n'), ((8520, 8642), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (8533, 8642), False, 'import cv2\n'), ((4241, 4284), 'numpy.array', 'np.array', (['self.results.multi_hand_landmarks'], {}), '(self.results.multi_hand_landmarks)\n', (4249, 4284), True, 'import numpy as np\n'), ((4819, 4842), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p2)'], {}), '(p1 - p2)\n', (4833, 4842), True, 'import numpy as np\n'), ((4846, 4869), 'numpy.linalg.norm', 'np.linalg.norm', (['(p3 - p2)'], {}), '(p3 - p2)\n', (4860, 4869), True, 'import numpy as np\n'), ((13621, 13711), 'os.listdir', 'os.listdir', (['"""/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/"""'], {}), "(\n '/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/')\n", (13631, 13711), False, 'import os\n'), ((17000, 17017), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (17012, 17017), False, 'import requests\n'), ((17112, 17137), 'cv2.imdecode', 'cv2.imdecode', (['img_arr', '(-1)'], {}), '(img_arr, -1)\n', (17124, 17137), False, 'import cv2\n'), ((17156, 17198), 'imutils.resize', 'imutils.resize', (['img'], {'width': '(640)', 'height': '(480)'}), '(img, width=640, height=480)\n', (17170, 17198), False, 'import imutils\n'), ((19310, 19321), 'time.time', 'time.time', ([], {}), '()\n', (19319, 19321), False, 'import time\n'), ((19648, 19683), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img_background'], {}), "('Image', img_background)\n", (19658, 19683), False, 'import cv2\n'), ((19696, 19712), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (19707, 19712), False, 'import cv2\n'), ((19725, 19740), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (19735, 19740), False, 'import time\n'), ((10509, 10519), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10517, 10519), False, 'import sys\n'), ((14841, 14851), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14849, 14851), False, 'import sys\n'), ((15628, 15641), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15638, 15641), False, 'import time\n'), ((15747, 15757), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15755, 15757), False, 'import sys\n'), ((19938, 19948), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19946, 19948), False, 'import sys\n'), ((9687, 9749), 'cv2.rectangle', 'cv2.rectangle', (['img_background', 'start', 'end', '(255, 182, 193)', '(-1)'], {}), '(img_background, start, end, (255, 182, 193), -1)\n', (9700, 9749), False, 'import cv2\n'), ((10160, 10222), 'cv2.rectangle', 'cv2.rectangle', (['img_background', 'start', 'end', '(144, 238, 144)', '(-1)'], {}), '(img_background, start, end, (144, 238, 144), -1)\n', (10173, 10222), False, 'import cv2\n')]
|
from itertools import chain
from time import time
from django.urls import reverse
from rest_framework import fields, serializers
from . import models
class LineItemSerializerRegistry:
"""Registers serializers with their associated models.
This is used instead of discovery or a metaclass-based registry as
making sure the classes to be registered actually get imported can be
fragile and non-obvious to debug.
The registry instance is available at
``lorikeet.api_serializers.registry``.
"""
def __init__(self):
self.line_items = {}
self.payment_methods = {}
self.delivery_addresses = {}
self.adjustments = {}
def register(self, model, serializer):
"""Associate ``model`` with ``serializer``."""
print(model.__mro__)
if issubclass(model, models.LineItem):
self.line_items[model.__name__] = serializer
elif issubclass(model, models.PaymentMethod):
self.payment_methods[model.__name__] = serializer
elif issubclass(model, models.DeliveryAddress):
self.delivery_addresses[model.__name__] = serializer
elif issubclass(model, models.Adjustment):
self.adjustments[model.__name__] = serializer
else:
raise ValueError(
"model must be a subclass of "
"LineItem, PaymentMethod, DeliveryAddress or "
"Adjustment"
)
def get_serializer_class(self, instance):
if isinstance(instance, models.LineItem):
return self.line_items[instance.__class__.__name__]
if isinstance(instance, models.PaymentMethod):
return self.payment_methods[instance.__class__.__name__]
if isinstance(instance, models.DeliveryAddress):
return self.delivery_addresses[instance.__class__.__name__]
if isinstance(instance, models.Adjustment):
return self.adjustments[instance.__class__.__name__]
raise ValueError(
"instance must be an instance of a "
"LineItem, PaymentMethod, DeliveryAddress or "
"Adjustment subclass"
)
def get_serializer(self, instance):
return self.get_serializer_class(instance)(instance)
registry = LineItemSerializerRegistry()
class WritableSerializerMethodField(fields.SerializerMethodField):
def __init__(self, write_serializer, method_name=None, **kwargs):
self.method_name = method_name
self.write_serializer = write_serializer
kwargs["source"] = "*"
super(fields.SerializerMethodField, self).__init__(**kwargs) # noqa
def to_internal_value(self, representation):
return {
self.field_name: self.write_serializer.to_representation(representation)
}
class PrimaryKeyModelSerializer(serializers.ModelSerializer):
"""A serializer that accepts the primary key of an object as input.
When read from, this serializer works exactly the same as ModelSerializer.
When written to, it accepts a valid primary key of an existing instance
of the same model. It can't be used to add or edit model instances.
This is provided as a convenience, for the common use case of a
:class:`~lorikeet.models.LineItem` subclass that has a foreign key to
a product model; see the :doc:`Getting Started Guide <backend>` for a
usage example.
"""
def get_queryset(self):
"""Returns a queryset which the model instance is retrieved from.
By default, returns ``self.Meta.model.objects.all()``.
"""
return self.Meta.model.objects.all()
def to_internal_value(self, representation):
return self.get_queryset().get(pk=representation)
class RegistryRelatedField(fields.Field):
def to_representation(self, instance):
return registry.get_serializer(instance).data
class RegistryRelatedWithMetadataSerializer(serializers.Serializer):
type = fields.SerializerMethodField()
data = fields.SerializerMethodField()
def get_type(self, instance):
return instance.__class__.__name__
def get_data(self, instance):
return RegistryRelatedField().to_representation(instance)
class LineItemMetadataSerializer(RegistryRelatedWithMetadataSerializer):
data = WritableSerializerMethodField(fields.DictField())
total = fields.SerializerMethodField()
url = fields.SerializerMethodField()
def get_total(self, instance):
return str(instance.get_total())
def get_url(self, instance):
return reverse("lorikeet:cart-item", kwargs={"id": instance.id})
def update(self, instance, validated_data):
ser = registry.get_serializer(instance)
return ser.update(instance, validated_data["data"])
class DeliveryAddressSerializer(RegistryRelatedWithMetadataSerializer):
selected = WritableSerializerMethodField(fields.BooleanField())
url = fields.SerializerMethodField()
def get_selected(self, instance):
return instance.id == self.context["cart"].delivery_address_id
def get_url(self, instance):
return reverse("lorikeet:address", kwargs={"id": instance.id})
def update(self, instance, validated_data):
if validated_data["selected"]:
cart = self.context["cart"]
cart.delivery_address = instance
cart.save()
return instance
class PaymentMethodSerializer(RegistryRelatedWithMetadataSerializer):
selected = WritableSerializerMethodField(fields.BooleanField())
url = fields.SerializerMethodField()
def get_selected(self, instance):
return instance.id == self.context["cart"].payment_method_id
def update(self, instance, validated_data):
if validated_data["selected"]:
cart = self.context["cart"]
cart.payment_method = instance
cart.save()
return instance
def get_url(self, instance):
return reverse("lorikeet:payment-method", kwargs={"id": instance.id})
class AdjustmentSerializer(RegistryRelatedWithMetadataSerializer):
total = fields.SerializerMethodField()
url = fields.SerializerMethodField()
def get_total(self, instance):
# TODO: Store subtotal so that it's only calculated once
return str(instance.get_total(instance.cart.get_subtotal()))
def get_url(self, instance):
return reverse("lorikeet:adjustment", kwargs={"id": instance.id})
class SubclassListSerializer(serializers.ListSerializer):
def to_representation(self, instance, *args, **kwargs):
instance = instance.select_subclasses()
return super().to_representation(instance, *args, **kwargs)
class CartSerializer(serializers.ModelSerializer):
items = SubclassListSerializer(child=LineItemMetadataSerializer())
new_item_url = fields.SerializerMethodField()
subtotal = fields.DecimalField(
max_digits=7, decimal_places=2, source="get_subtotal"
)
delivery_addresses = fields.SerializerMethodField()
new_address_url = fields.SerializerMethodField()
payment_methods = fields.SerializerMethodField()
new_payment_method_url = fields.SerializerMethodField()
adjustments = SubclassListSerializer(child=AdjustmentSerializer())
new_adjustment_url = fields.SerializerMethodField()
grand_total = fields.DecimalField(
max_digits=7, decimal_places=2, source="get_grand_total"
)
is_complete = fields.SerializerMethodField()
incomplete_reasons = fields.SerializerMethodField()
is_authenticated = fields.SerializerMethodField()
checkout_url = fields.SerializerMethodField()
generated_at = fields.SerializerMethodField()
email = fields.EmailField()
compatible_version = fields.SerializerMethodField()
incompatible_version = fields.SerializerMethodField()
def get_new_item_url(self, _):
return reverse("lorikeet:add-to-cart")
def get_new_address_url(self, _):
return reverse("lorikeet:new-address")
def get_delivery_addresses(self, cart):
selected = cart.delivery_address_subclass
the_set = []
if cart.user:
the_set = cart.user.delivery_addresses.filter(
active=True
).select_subclasses()
if selected is not None and selected not in the_set:
the_set = chain(the_set, [selected])
return DeliveryAddressSerializer(
instance=the_set, many=True, context={"cart": cart}
).data
def get_new_payment_method_url(self, _):
return reverse("lorikeet:new-payment-method")
def get_payment_methods(self, cart):
the_set = []
selected = cart.payment_method_subclass
if cart.user:
the_set = cart.user.paymentmethod_set.filter(
active=True
).select_subclasses()
if selected is not None and selected not in the_set:
the_set = chain(the_set, [selected])
return PaymentMethodSerializer(
instance=the_set, many=True, context={"cart": cart}
).data
def get_new_adjustment_url(self, _):
return reverse("lorikeet:new-adjustment")
def get_generated_at(self, cart):
return time()
def get_is_complete(self, cart):
return cart.is_complete()
def get_incomplete_reasons(self, cart):
return cart.errors.to_json()
def get_is_authenticated(self, cart):
return cart.user_id is not None
def get_checkout_url(self, _):
return reverse("lorikeet:checkout")
def get_compatible_version(self, _):
return 2
def get_incompatible_version(self, _):
return 1
class Meta:
model = models.Cart
fields = (
"items",
"new_item_url",
"delivery_addresses",
"new_address_url",
"payment_methods",
"new_payment_method_url",
"grand_total",
"generated_at",
"is_complete",
"incomplete_reasons",
"checkout_url",
"is_authenticated",
"email",
"adjustments",
"new_adjustment_url",
"subtotal",
"compatible_version",
"incompatible_version",
)
class CartUpdateSerializer(serializers.ModelSerializer):
"""Serializer for updating the cart; used only for email field."""
class Meta:
model = models.Cart
fields = ("email",)
class LineItemSerializer(serializers.ModelSerializer):
"""Base serializer for LineItem subclasses."""
def __init__(self, instance=None, *args, **kwargs): # noqa
if "cart" in kwargs:
self.cart = kwargs.pop("cart")
elif instance is not None:
self.cart = instance.cart
else:
raise TypeError(
"Either instance or cart arguments must be "
"provided to {}".format(self.__class__.__name__)
)
super().__init__(instance, *args, **kwargs)
def create(self, validated_data):
validated_data["cart"] = self.cart
return super().create(validated_data)
|
[
"rest_framework.fields.EmailField",
"rest_framework.fields.BooleanField",
"rest_framework.fields.DictField",
"rest_framework.fields.SerializerMethodField",
"time.time",
"django.urls.reverse",
"rest_framework.fields.DecimalField",
"itertools.chain"
] |
[((3968, 3998), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (3996, 3998), False, 'from rest_framework import fields, serializers\n'), ((4010, 4040), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (4038, 4040), False, 'from rest_framework import fields, serializers\n'), ((4368, 4398), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (4396, 4398), False, 'from rest_framework import fields, serializers\n'), ((4409, 4439), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (4437, 4439), False, 'from rest_framework import fields, serializers\n'), ((4933, 4963), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (4961, 4963), False, 'from rest_framework import fields, serializers\n'), ((5550, 5580), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (5578, 5580), False, 'from rest_framework import fields, serializers\n'), ((6101, 6131), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (6129, 6131), False, 'from rest_framework import fields, serializers\n'), ((6142, 6172), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (6170, 6172), False, 'from rest_framework import fields, serializers\n'), ((6830, 6860), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (6858, 6860), False, 'from rest_framework import fields, serializers\n'), ((6876, 6950), 'rest_framework.fields.DecimalField', 'fields.DecimalField', ([], {'max_digits': '(7)', 'decimal_places': '(2)', 'source': '"""get_subtotal"""'}), "(max_digits=7, decimal_places=2, source='get_subtotal')\n", (6895, 6950), False, 'from rest_framework import fields, serializers\n'), ((6990, 7020), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7018, 7020), False, 'from rest_framework import fields, serializers\n'), ((7043, 7073), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7071, 7073), False, 'from rest_framework import fields, serializers\n'), ((7096, 7126), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7124, 7126), False, 'from rest_framework import fields, serializers\n'), ((7156, 7186), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7184, 7186), False, 'from rest_framework import fields, serializers\n'), ((7283, 7313), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7311, 7313), False, 'from rest_framework import fields, serializers\n'), ((7332, 7409), 'rest_framework.fields.DecimalField', 'fields.DecimalField', ([], {'max_digits': '(7)', 'decimal_places': '(2)', 'source': '"""get_grand_total"""'}), "(max_digits=7, decimal_places=2, source='get_grand_total')\n", (7351, 7409), False, 'from rest_framework import fields, serializers\n'), ((7442, 7472), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7470, 7472), False, 'from rest_framework import fields, serializers\n'), ((7498, 7528), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7526, 7528), False, 'from rest_framework import fields, serializers\n'), ((7552, 7582), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7580, 7582), False, 'from rest_framework import fields, serializers\n'), ((7602, 7632), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7630, 7632), False, 'from rest_framework import fields, serializers\n'), ((7652, 7682), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7680, 7682), False, 'from rest_framework import fields, serializers\n'), ((7695, 7714), 'rest_framework.fields.EmailField', 'fields.EmailField', ([], {}), '()\n', (7712, 7714), False, 'from rest_framework import fields, serializers\n'), ((7740, 7770), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7768, 7770), False, 'from rest_framework import fields, serializers\n'), ((7798, 7828), 'rest_framework.fields.SerializerMethodField', 'fields.SerializerMethodField', ([], {}), '()\n', (7826, 7828), False, 'from rest_framework import fields, serializers\n'), ((4336, 4354), 'rest_framework.fields.DictField', 'fields.DictField', ([], {}), '()\n', (4352, 4354), False, 'from rest_framework import fields, serializers\n'), ((4566, 4623), 'django.urls.reverse', 'reverse', (['"""lorikeet:cart-item"""'], {'kwargs': "{'id': instance.id}"}), "('lorikeet:cart-item', kwargs={'id': instance.id})\n", (4573, 4623), False, 'from django.urls import reverse\n'), ((4900, 4921), 'rest_framework.fields.BooleanField', 'fields.BooleanField', ([], {}), '()\n', (4919, 4921), False, 'from rest_framework import fields, serializers\n'), ((5123, 5178), 'django.urls.reverse', 'reverse', (['"""lorikeet:address"""'], {'kwargs': "{'id': instance.id}"}), "('lorikeet:address', kwargs={'id': instance.id})\n", (5130, 5178), False, 'from django.urls import reverse\n'), ((5517, 5538), 'rest_framework.fields.BooleanField', 'fields.BooleanField', ([], {}), '()\n', (5536, 5538), False, 'from rest_framework import fields, serializers\n'), ((5957, 6019), 'django.urls.reverse', 'reverse', (['"""lorikeet:payment-method"""'], {'kwargs': "{'id': instance.id}"}), "('lorikeet:payment-method', kwargs={'id': instance.id})\n", (5964, 6019), False, 'from django.urls import reverse\n'), ((6392, 6450), 'django.urls.reverse', 'reverse', (['"""lorikeet:adjustment"""'], {'kwargs': "{'id': instance.id}"}), "('lorikeet:adjustment', kwargs={'id': instance.id})\n", (6399, 6450), False, 'from django.urls import reverse\n'), ((7880, 7911), 'django.urls.reverse', 'reverse', (['"""lorikeet:add-to-cart"""'], {}), "('lorikeet:add-to-cart')\n", (7887, 7911), False, 'from django.urls import reverse\n'), ((7966, 7997), 'django.urls.reverse', 'reverse', (['"""lorikeet:new-address"""'], {}), "('lorikeet:new-address')\n", (7973, 7997), False, 'from django.urls import reverse\n'), ((8552, 8590), 'django.urls.reverse', 'reverse', (['"""lorikeet:new-payment-method"""'], {}), "('lorikeet:new-payment-method')\n", (8559, 8590), False, 'from django.urls import reverse\n'), ((9133, 9167), 'django.urls.reverse', 'reverse', (['"""lorikeet:new-adjustment"""'], {}), "('lorikeet:new-adjustment')\n", (9140, 9167), False, 'from django.urls import reverse\n'), ((9222, 9228), 'time.time', 'time', ([], {}), '()\n', (9226, 9228), False, 'from time import time\n'), ((9517, 9545), 'django.urls.reverse', 'reverse', (['"""lorikeet:checkout"""'], {}), "('lorikeet:checkout')\n", (9524, 9545), False, 'from django.urls import reverse\n'), ((8342, 8368), 'itertools.chain', 'chain', (['the_set', '[selected]'], {}), '(the_set, [selected])\n', (8347, 8368), False, 'from itertools import chain\n'), ((8929, 8955), 'itertools.chain', 'chain', (['the_set', '[selected]'], {}), '(the_set, [selected])\n', (8934, 8955), False, 'from itertools import chain\n')]
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from shutil import rmtree
import xlsxwriter
from classy_xlsx.core import XlsxContext
from .worksheet import XlsxSheetFabric, OneRegionXlsxSheet, XlsxSheet
class XlsxWorkbook(XlsxContext):
file_name = '/tmp/workbook.xlsx'
def __init__(self, context=None, file_name=None):
super(XlsxWorkbook, self).__init__(context=context)
if file_name:
self.file_name = file_name
self.sheets = []
self._dest_file = self.get_filename()
self.out_wb = xlsxwriter.Workbook(self._dest_file)
self.formats = {}
self.tmp_dir = None
raw_sheets = XlsxSheet.copy_fields_to_instance(instance=self)
self.sheets = OrderedDict()
for name, sheet in raw_sheets.iteritems():
if isinstance(sheet, XlsxSheetFabric):
i = 0
for sub_sheet in sheet.get_sheets():
sub_sheet.workbook = self
self.sheets[sub_sheet.get_name()] = sub_sheet
i += 1
else:
self.sheets[name] = sheet
def get_format(self, fmt):
key = repr(fmt)
if key not in self.formats:
self.formats[key] = self.out_wb.add_format(fmt)
return self.formats[key]
def get_filename(self):
return self.file_name
def get_result_ws(self, sheet_name):
sheet_name = self._refine_sheet_name(sheet_name)
for ws in self.out_wb.worksheets():
ws_name = ws.get_name()
if ws_name == sheet_name:
break
else:
ws = self.out_wb.add_worksheet(sheet_name)
return ws
def make_report(self, context=None, file_name=None):
if context:
self._context = context
if file_name:
self.file_name = file_name
self.before_make_report()
sheets_by_priority = []
for sheet in self.sheets.values():
if sheet.fill_priority:
sheet.prepare_to_xlsx()
sheets_by_priority.append(sheet)
else:
sheet.to_xlsx()
sheets_by_priority.sort(key=lambda x: x.fill_priority)
for sheet in sheets_by_priority:
sheet.to_xlsx()
self.save()
self.after_make_report()
return self._dest_file
def save(self):
self.out_wb.close()
if self.tmp_dir:
rmtree(self.tmp_dir)
self.tmp_dir = None
def before_make_report(self):
pass
def after_make_report(self):
pass
def _refine_sheet_name(self, sheet_name):
res = u''
for char in sheet_name:
if char in ('[', ']'):
res += '_'
else:
res += char
return res
class OneTableXlsxWorkbook(XlsxWorkbook):
region_class = None
list_name = 'list1'
def __init__(self, **kwargs):
if 'region_class' in kwargs:
self.region_class = kwargs['region_class']
del kwargs['region_class']
elif not self.region_class:
raise Exception('OneTableXlsxWorkbook need region_class as attribute or in kwargs!')
sheet = OneRegionXlsxSheet(region_class=self.region_class)
setattr(self.__class__, self.list_name, sheet)
super(OneTableXlsxWorkbook, self).__init__(**kwargs)
|
[
"collections.OrderedDict",
"shutil.rmtree",
"xlsxwriter.Workbook"
] |
[((558, 594), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['self._dest_file'], {}), '(self._dest_file)\n', (577, 594), False, 'import xlsxwriter\n'), ((742, 755), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (753, 755), False, 'from collections import OrderedDict\n'), ((2463, 2483), 'shutil.rmtree', 'rmtree', (['self.tmp_dir'], {}), '(self.tmp_dir)\n', (2469, 2483), False, 'from shutil import rmtree\n')]
|
from tc2py import *
import pysces
import re
def getPyscesModel():
A = tc_allItems();
N = fromMatrix( tc_getStoichiometry(A), True );
rates0 = fromTC( tc_getRates(A) );
params = fromTC( tc_getParameters(A) );
fixed = fromTC( tc_getFixedVariables(A) );
inits = fromTC( tc_getInitialValues(A) );
funcsNames = fromTC( tc_getForcingFunctionNames(A) );
funcsAssign0 = fromTC( tc_getForcingFunctionAssignments(A) );
triggers = fromTC( tc_getEventTriggers() );
events0 = fromTC( tc_getEventResponses() );
#tc_deleteItemsArray(A);
emptyExists = False;
modelString = '';
rates = [];
funcsAssign = [];
events = [];
p = re.compile('\^');
for i in rates0:
rates.append(p.sub('**',i));
for i in funcsAssign0:
funcsAssign.append(p.sub('**',i));
for i in events0:
events.append(p.sub('**',i));
reacs = len(rates);
species = len(N[0]);
for i in range(0,reacs): #for each reaction
lhs = [];
rhs = [];
for j in range(0,species): #get reactants and products
n = N[2][j][i];
if n > 0:
if n != 1.0:
rhs.append("{" + str(n) + "}" + str(N[0][j])); #product
else:
rhs.append(str(N[0][j]));
elif n < 0:
n = -n;
if n != 1.0:
lhs.append("{" + str(n) + "}" + str(N[0][j])); #reactants
else:
lhs.append(str(N[0][j]));
#full reaction and its rate
if len(lhs) > 0 or len(rhs) > 0:
modelString += N[1][i] + ":\n"; #print its name
if len(lhs) == 0:
lhs.append("EMPTY");
emptyExists = True;
if len(rhs) == 0:
rhs.append("EMPTY");
emptyExists = True;
modelString += " " + "+".join(lhs) + " > " + "+".join(rhs) + "\n";
modelString += " " + rates[i] + "\n\n";
#we are done with reactions. moving on to params, events, functions, etc.
fix = '';
if emptyExists:
fix = "FIX: EMPTY";
n = len(fixed[0]);
if n > 0 and len(fixed[0]) == len(fixed[2][0]):
if not emptyExists:
fix += "FIX:";
for i in range(0,n):
fix += " " + fixed[0][0][i];
modelString = fix + "\n\n" + modelString;
modelString += "# Init ext\n";
#fixed variables
if n > 0 and len(fixed[0]) == len(fixed[2][0]):
for i in range(0,n):
modelString += fixed[0][i] + " = " + str(fixed[2][0][i]) + "\n";
#initial variables
hashInits = {};
n = len(inits[0]);
if n > 0 and len(inits[0]) == len(inits[2][0]):
modelString += "\n# Init vars\n";
for i in range(0,n):
hashInits[ inits[0][i] ] = inits[2][0][i];
modelString += inits[0][i] + " = " + str(inits[2][0][i]) + "\n";
for j in N[0]:
if not hashInits.has_key(j):
modelString += j + " = 0.0\n";
#parameters -- remove unused parameters
n = len(params[0]);
if n > 0 and len(params[0]) == len(params[2][0]):
modelString += "\n# Init params\n";
for i in range(0,n): #for each parameter
modelString += params[0][i] + " = " + str(params[2][0][i]) + "\n";
n = len(funcsNames);
if n > 0 and len(funcsNames) == len(funcsAssign):
modelString += "\n# Forcing functions\n";
for i in range(0,n):
modelString += "!F " + funcsNames[i] + " = " + funcsAssign[i] + "\n";
n = len(triggers);
if n > 0 and len(triggers) == len(events):
modelString += "\n# Events\n";
for i in range(0,n):
modelString += "Event: event" + str(i) + "," + triggers[i] + " , 0 {" + events[i] + "}\n";
p = re.compile("\^");
p.sub("**",modelString);
#return modelString;
mod = pysces.model("model",loader="string",fString=modelString);
mod.doLoad();
return mod;
|
[
"pysces.model",
"re.compile"
] |
[((679, 696), 're.compile', 're.compile', (['"""\\\\^"""'], {}), "('\\\\^')\n", (689, 696), False, 'import re\n'), ((3839, 3856), 're.compile', 're.compile', (['"""\\\\^"""'], {}), "('\\\\^')\n", (3849, 3856), False, 'import re\n'), ((3922, 3981), 'pysces.model', 'pysces.model', (['"""model"""'], {'loader': '"""string"""', 'fString': 'modelString'}), "('model', loader='string', fString=modelString)\n", (3934, 3981), False, 'import pysces\n')]
|
import logging
from io import StringIO
from optparse import make_option
from django.core.management.base import BaseCommand
import pymarc
from core.management.commands import configure_logging
from core import solr_index
from core.models import Title
configure_logging("openoni_purge_titles.config", "openoni_purge_etitles.log")
_log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command for purging title records which have an 856 field
containing a link to Chronicling America, and which appear to be records
for an electronic only version of a title 245 $h == [electronic resource].
The script is careful not to purge any records that have issues attached
to them.
If you want to see the records that will be purged use the --pretend
option.
"""
def add_arguments(self, parser):
# Options
parser.add_argument('-p', '--pretend', action='store_true',
default=False, dest='pretend', help='Pretend; just print titles')
def handle(self, **options):
for title in Title.objects.filter(urls__value__icontains=
'chroniclingamerica'):
record = pymarc.parse_xml_to_array(StringIO(title.marc.xml))[0]
if record['245']['h'] == '[electronic resource].':
if options['pretend']:
print(title)
else:
_log.info("deleting %s [%s] from solr index")
solr_index.delete_title(title)
_log.info("purging %s [%s]" % (title, title.lccn))
title.delete()
if not options['pretend']:
solr_index.conn().commit()
|
[
"io.StringIO",
"core.solr_index.delete_title",
"core.models.Title.objects.filter",
"core.solr_index.conn",
"logging.getLogger",
"core.management.commands.configure_logging"
] |
[((255, 332), 'core.management.commands.configure_logging', 'configure_logging', (['"""openoni_purge_titles.config"""', '"""openoni_purge_etitles.log"""'], {}), "('openoni_purge_titles.config', 'openoni_purge_etitles.log')\n", (272, 332), False, 'from core.management.commands import configure_logging\n'), ((340, 367), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (357, 367), False, 'import logging\n'), ((1079, 1144), 'core.models.Title.objects.filter', 'Title.objects.filter', ([], {'urls__value__icontains': '"""chroniclingamerica"""'}), "(urls__value__icontains='chroniclingamerica')\n", (1099, 1144), False, 'from core.models import Title\n'), ((1210, 1234), 'io.StringIO', 'StringIO', (['title.marc.xml'], {}), '(title.marc.xml)\n', (1218, 1234), False, 'from io import StringIO\n'), ((1482, 1512), 'core.solr_index.delete_title', 'solr_index.delete_title', (['title'], {}), '(title)\n', (1505, 1512), False, 'from core import solr_index\n'), ((1666, 1683), 'core.solr_index.conn', 'solr_index.conn', ([], {}), '()\n', (1681, 1683), False, 'from core import solr_index\n')]
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Expresiones.Primitivo import Primitivo
from Instrucciones.Expresiones.Enum import Enum
from storageManager.jsonMode import *
class CreateType(Instruccion):
def __init__(self, id, tipo, listaExpre, strGram,linea, columna, strSent):
Instruccion.__init__(self,tipo,linea,columna, strGram, strSent)
self.valor = id
self.listaExpre = listaExpre
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
enum1 = Enum(self.valor, None, self.linea, self.columna)
lista = []
if(self.listaExpre):
#print("------VALORES------")
for x in range(0,len(self.listaExpre)):
#volver tipo primitivo
if(type(self.listaExpre[x]) is Primitivo):
valor = self.listaExpre[x].ejecutar(tabla,arbol)
lista.append(valor)
#print(valor)
#print(lista)
enum1.listaValores = lista
arbol.lEnum.append(enum1)
#print(self.valor + " linea: " + str(self.linea) + " columna: " + str(self.columna))
arbol.consola.append("Consulta devuelta correctamente.")
def traducir(self,tabla,arbol,cadenaTraducida):
temporal = arbol.generaTemporal()
codigo = "\t" + temporal + " = " + "\"" + self.strSent + "\"\n"
codigo += "\tFuncionesPara3D.ejecutarsentecia(" + temporal + ")\n\n"
return codigo
|
[
"Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__",
"Instrucciones.Expresiones.Enum.Enum"
] |
[((327, 393), 'Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__', 'Instruccion.__init__', (['self', 'tipo', 'linea', 'columna', 'strGram', 'strSent'], {}), '(self, tipo, linea, columna, strGram, strSent)\n', (347, 393), False, 'from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n'), ((545, 593), 'Instrucciones.Expresiones.Enum.Enum', 'Enum', (['self.valor', 'None', 'self.linea', 'self.columna'], {}), '(self.valor, None, self.linea, self.columna)\n', (549, 593), False, 'from Instrucciones.Expresiones.Enum import Enum\n')]
|
"""Contains specialized classes which don't interact directly with users, don't
have many instances, and use human-readable key names."""
from google.appengine.api import logservice # ErrorChecker
from google.appengine.api import mail # ErrorChecker
from google.appengine.api import search # Indexer
from google.appengine.ext import db
from google.appengine.ext.db.metadata import Kind # Indexer
import calendar # converts datetime to utc
import collections
import copy
import datetime
import itertools # slicing streams
import json
import logging # error logging
import re
from id_model import Activity, Cohort, User
import config
import core
import util
class NamedModel(core.Model):
"""Ancestor for specialized entities that can be retrieved directly by
knowing their key name. They do not have ids like core entities do.
Example query:
Aggregator.get_by_key_name('the aggregator')
"""
@classmethod
def create(klass, **kwargs):
if 'key_name' not in kwargs:
raise Exception("NamedModel entities must have a key name.")
return klass(**kwargs)
@classmethod
def get_by_id(klass, key_name):
"""The App Engine method of this name looks up entities by a *numeric*
id, which doesn't apply for NamedModel instances. Making this an alias
of get_by_key_name allows kind-agnostic getting via core.Model."""
return klass.get_by_key_name(key_name)
def to_dict(self):
"""Overrides Model.to_dict() so that dynamically-retrieved info, like
key_name, can be included in the dictionary representation."""
d = core.Model.to_dict(self)
d['key_name'] = self.key().name()
return d
def __str__(self):
"""A string represenation of the entity. Goal is to be readable.
Returns, e.g. <id_model.Pd Pd_oha4tp8a4tph1>.
Native implementation does nothing useful.
"""
return '<named_model.{} "{}">'.format(
self.__class__.__name__, self.key().name())
def __repr__(self):
"""A unique representation of the entity. Goal is to be unambiguous.
But our ids are unambiguous, so we can just forward to __str__.
Native implemention returns a useless memory address.
"""
return self.__str__()
# special methods to allow comparing entities, even if they're different
# instances according to python
# https://groups.google.com/forum/?fromgroups=#!topic/google-appengine-python/uYneYzhIEzY
def __eq__(self, value):
"""Allows entity == entity to be True if keys match.
Is NOT called by `foo is bar`."""
if self.__class__ == value.__class__:
return str(self.key()) == str(value.key())
else:
return False
# Because we defined the 'equals' method, eq, we must also be sure to
# define the 'not equals' method, ne, otherwise you might get A == B is
# True, and A != B is also True!
def __ne__(self, value):
"""Allows entity != entity to be False if keys match."""
if self.__class__ == value.__class__:
# if they're the same class, compare their keys
return str(self.key()) != str(value.key())
else:
# if they're not the same class, then it's definitely True that
# they're not equal.
return True
def __hash__(self):
"""Allows entity in entity_list to be True."""
return hash(str(self.key()))
class Aggregator(NamedModel):
"""Update aggregated statistics on student progress.
## Purpose
"Complex writes for easy reads." We want summaries of complex tracking
information available on the dashboard so researchers and school admins
can review the progress of their respective participants.
## Design
The aggregation spider stores a timestamp will continually check
for changes more recent than that timestamp. After completing a check, it
sets the timestamp either to the current time (if no changes were found) or
to the most recent change it found.
The Aggregator is interested in two kinds of changes:
1. New progress pds where the variable is of the form 'sX__progress', where
X is the session number.
2. Modified student entities. This is relevent if a student's certification
status or participation code have changed.
If it finds any changes like this, then the aggregator will start a three-
tiered set up updates. Data from progress pds is summarized and stored with
user entities. Those summaries are themselves summarized and stored with
the related activities. This is done once again for cohorts.
## Known Inefficiency
If a cohort has had multiple activity updates at different times, it will
be re-summarized every time. Functionally, this should not be too big a
problem since cohorts are small.
"""
# Aggregator properties
last_check = util.DictionaryProperty()
def get_changed(self, kind):
"""Get all entities of a kind which have been modified recently."""
# Fetch in smaller chunks to prevent the process from being too slow.
fetch_size = 50
if kind not in self.last_check or not self.last_check[kind]:
self.last_check[kind] = datetime.datetime(1970, 1, 1, 0, 0)
# Check for updated entities of specified kind.
klass = core.kind_to_class(kind)
query = klass.all().filter('is_test =', False)
# Do NOT filter by deleted, b/c we want recently-deleted entities
# to come up in this query as "changed", forcing their parent to
# update their totals downward.
if kind == 'pd':
# The exception for this is pd, which uses deletion differently.
# We never have a reason to pay attention to deleted pd (except
# for manual debugging and data analysis). And, in fact, we never
# want pd progress values to decrease.
query.filter('deleted =', False)
# This only applies to pds for now, which need public = True.
if kind in config.kinds_with_get_filters:
for filter_tuple in klass.get_filters():
query.filter(*filter_tuple)
query.filter('modified >', self.last_check[kind])
query.order('modified')
result = query.fetch(fetch_size)
# Set last_check for where most recent update left off.
if len(result) > 0:
self.last_check[kind] = result[-1].modified
else:
self.last_check[kind] = datetime.datetime.now()
return result
def save(self, kind, changed_entities):
"""Save entities with their new aggregation data, but don't update
their modified time.
Otherwise, the aggregator would constantly be going over entities it
had modified, rather than the ones we're really interested in.
"""
for e in changed_entities:
# Copy the aggregated_data property to a JSON-serialized one, which
# we can analyze through BigQuery.
e.aggregation_json = e.aggregation_data
# Record that the aggregator touched this entity.
e.aggregated = self.datetime()
# Save w/o changing modified time.
db.put(changed_entities, set_modified_time=False)
def summarize_students(self, students, activity_ordinal):
"""Given all the students of an activity, sums up their data."""
summary = {
'n': 0,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 0
}
for student in students:
# If any of the student's codes indicate that they somehow aren't
# a real student (e.g. marked "Discard"), don't count them at all.
exclude_student = False
for ordinal, code in student.status_codes.items():
if code and config.status_codes[code]['exclude_from_counts']:
exclude_student = True
if exclude_student:
# Don't do anything with this student, continue to the next one
continue
student.aggregation_data.setdefault(
activity_ordinal,
copy.deepcopy(student.aggregation_data_template))
agg_data = student.aggregation_data[activity_ordinal]
status_code = student.status_codes.setdefault(
activity_ordinal, None)
if agg_data['progress'] is 100:
summary['completed'] += 1
elif status_code:
status = config.status_codes[status_code]
if not status['study_eligible']:
# Don't count this student at all. Skip to next one.
continue
if status['counts_as_completed']:
summary['completed'] += 1
elif status['makeup_eligible']:
summary['makeup_eligible'] += 1
else:
summary['makeup_ineligible'] += 1
else:
summary['uncoded'] += 1
# Study ineligible students and exclude_from_count students are
# not counted in this total b/c they exit the for loop higher up.
summary['n'] += 1
return summary
def aggregate_to_users(self):
"""Calculate session progress and accounted_for status for users.
'progress' is just a redundant storage of a student's progress pd
value. This makes it easier to display the progress of many students at
once without having to pull pd.
"""
# Some terminology:
# changed_X: these things are around because the aggregator detected
# they have recent modifications; they come from
# aggregator.get_changed(kind).
# referenced_X: these things are referenced by things that have
# changed. Often, they're around because we need to "back-query"
# stuff to get complete totals. They're not necessarily changed,
# but we have to roll them into our statistics because they're
# siblings of things that have changed, and we're summarizing all
# the children into the parent.
# Trigger 1 of 2: modified pds which need to aggregate to their users.
# E.g. a student's progress pd has increased.
util.profiler.add_event('...get changed pd')
changed_pds = self.get_changed('pd')
# We only want user-related progress pds which have an activity ordinal
# (some testing pds don't have an ordinal). Do some filtering.
util.profiler.add_event('...process')
referenced_user_ids = []
changed_progress_pds = []
for pd in changed_pds:
is_user_pd = core.get_kind(pd.scope) == 'user'
has_ordinal = isinstance(pd.activity_ordinal, (int, long))
if pd.is_progress() and is_user_pd and has_ordinal:
if pd.scope not in referenced_user_ids:
referenced_user_ids.append(pd.scope)
changed_progress_pds.append(pd)
if len(referenced_user_ids) > 0:
referenced_users = User.get_by_id(referenced_user_ids)
else:
referenced_users = []
# Trigger 2 of 2: modified users whose status codes may have changed.
# E.g. a student is marked absent, "accounting for" their lack of
# participation.
util.profiler.add_event('...get changed users')
changed_users = self.get_changed('user')
# Unique list of users from both triggers.
changed_users = list(set(changed_users + referenced_users))
# Most aggregation runs will have nothing new to aggregate. Exit
# quickly to save cpu load.
if len(changed_users) is 0:
return []
# Now with all the data in hand, start summarizing it and storing it
# in user entities.
util.profiler.add_event('...process')
pds_by_user = util.list_by(changed_progress_pds, 'scope')
for user in changed_users:
if user.id in pds_by_user:
# Under normal operation, there should only be one active pd
# per user per activity ordinal. But we've found evidence that
# there may be several, due to inconsistent querying in
# self.get_changed(). If there are multiple that might cause
# incorrect aggregation, log an error, and
# make sure to choose the highest progress value available.
pds = pds_by_user[user.id]
pds_by_ordinal = util.list_by(pds, 'activity_ordinal')
if any([len(v) > 1 for v in pds_by_ordinal.values()]):
logging.warning(
"Multiple pds found in aggregation: {}"
.format(json.dumps([pd.to_dict() for pd in pds])))
agg_data = {}
for pd in pds:
# Record progress values by activity ordinal
# Why coerce to int here? Sometimes it's a long.
# https://cloud.google.com/appengine/docs/python/datastore/typesandpropertyclasses#IntegerProperty
o = int(pd.activity_ordinal)
user.aggregation_data.setdefault(
o, copy.deepcopy(user.aggregation_data_template))
# v = int(pd.value)
# user.aggregation_data[o]['progress'] = v
# if v is 100 and user.get_status_code(o) is None:
new_v = int(pd.value)
# Keep track of which ordinals we've seen before
if o in agg_data and new_v < agg_data[o]['progress']:
# See pull #306
logging.error(
"Out-of-order hypothesis confirmed! {}"
.format(pds_by_user[user.id]))
elif o not in agg_data:
agg_data[o] = {'progress': None}
# Only save the value if it's larger than the previous
# (only relevant when there are more than one).
current_v = agg_data[o]['progress']
if current_v is None or new_v > current_v:
agg_data[o]['progress'] = new_v
if new_v is 100 and user.get_status_code(o) is None:
# Also assign the status code "Completed" to this
# student. This is technically redundant, but makes
# the data clearer.
user.set_status_code(o, 'COM')
# Copy compiled results into the user.
for k, v in agg_data.items():
user.aggregation_data[k] = agg_data[k]
# Save changes to users and the aggregator timestamp.
util.profiler.add_event('...save users')
self.save('user', changed_users)
return changed_users
def aggregate_to_activities(self, changed_users):
"""Calculate basic stats of users in an activity for reporting.
For instance, a count of all users with progress 100 will be saved
with the activity as aggregation_data['all']['completed']."""
# Firm assumptions: these are enforced.
# 1. We only want to aggregate student-users to student-type activities
# Soft assumptions: violating these assumptions will log errors but
# will not break the aggregator.
# 1. All students have one associated classroom.
changed_students = []
students_breaking_assumptions = []
for user in changed_users:
if user.user_type == 'student':
if len(user.assc_classroom_list) is 1:
changed_students.append(user)
else:
students_breaking_assumptions.append(user)
if len(students_breaking_assumptions) > 0:
logging.error("Students with bad classroom associations: {}"
.format(students_breaking_assumptions))
# Set up an index of users and activities by classroom (we'll need this
# later).
index = {}
cohort_ids = [] # Get list of unique cohorts.
for s in changed_students:
cl_id = s.assc_classroom_list[0]
if cl_id not in index:
index[cl_id] = {'students': [], 'activities': []}
co_id = s.assc_cohort_list[0]
if co_id not in cohort_ids:
cohort_ids.append(co_id)
classroom_ids = index.keys()
if len(classroom_ids) is 0:
return []
# Activities don't store relationships with users directly. To find
# them, query via classroom ids.
util.profiler.add_event('...get related activities')
query = Activity.all().filter('deleted =', False)
query.filter('is_test =', False)
query.filter('user_type =', 'student')
# If there are more than 30, don't filter by classroom id directly,
# because App Engine limits subqueries. Instead, filter the query by
# cohort, then further filter by classroom in-memory.
if len(classroom_ids) < 30:
logging.info(
"Using normal classroom query for classroom activities")
query.filter('assc_classroom_list IN', classroom_ids)
fetched_activities = query.run()
else:
logging.info(
"> 30 classrooms: Using cohort query for classroom activities")
query.filter('assc_cohort_list IN', cohort_ids)
fetched_activities = [a for a in query.run()
if a.assc_classroom_list[0] in classroom_ids]
# Soft assumptions: violating these assumptions will log errors but
# will not break the aggregator.
# 1. All student activities have one associated classroom.
util.profiler.add_event('...process')
referenced_activities = []
activities_breaking_assumptions = []
for a in fetched_activities:
if len(a.assc_classroom_list) is 1:
referenced_activities.append(a)
else:
activities_breaking_assumptions.append(a)
if len(activities_breaking_assumptions) > 0:
logging.error("Activities with bad classroom associations: {}"
.format(activities_breaking_assumptions))
# Add the activities to the index.
for a in referenced_activities:
c_id = a.assc_classroom_list[0]
index[c_id]['activities'].append(a)
# Re-query users for these classrooms so we can calculate the total
# number of students per classroom not just the change in number of
# students per classroom.
util.profiler.add_event('...get related users')
query = User.all().filter('deleted =', False)
query.filter('is_test =', False)
query.filter('user_type =', 'student')
# If there are more than 30, don't filter by classroom id directly,
# because App Engine limits subqueries. Instead, filter the query by
# cohort, then further filter by classroom in-memory.
if len(classroom_ids) < 30:
logging.info("Using normal classroom query for activity users")
query.filter('assc_classroom_list IN', classroom_ids)
referenced_students = query.run()
else:
logging.info(
"> 30 classrooms: using cohort query for activity users")
query.filter('assc_cohort_list IN', cohort_ids)
referenced_students = [
u for u in query.run()
if u.assc_classroom_list[0] in classroom_ids]
# Add the students to the index.
util.profiler.add_event('...process')
changed_student_index = {s.id: s for s in changed_students}
for s in referenced_students:
# Important: some of these users *just changed*, i.e. just had
# their aggregation modified and have been passed in to this
# function as changed_students. They may be more up to date than
# the version of the same entity returned by they query.
if s.id in changed_student_index:
# So, when possible, prefer the already-in-memory version.
s = changed_student_index[s.id]
# If any of the student's codes indicate that they somehow aren't
# a real student (e.g. marked "Discard"), don't count them at all.
exclude_student = False
for ordinal, code in s.status_codes.items():
if code and config.status_codes[code]['exclude_from_counts']:
exclude_student = True
if exclude_student:
# Don't do anything with this student, continue to the next one
continue
c_id = s.assc_classroom_list[0]
index[c_id]['students'].append(s)
# Iterate over activities, calculating stats based on the related set
# of users.
# See the summarize_students() method and/or the docs:
# https://docs.google.com/document/d/1tmZhuWMDX29zte6f0A8yXlSUvqluyMNEnFq8qyJq1pA
for classroom_id, d in index.items():
for a in d['activities']:
a.aggregation_data['total_students'] = len(d['students'])
cert_students = [s for s in d['students'] if s.certified]
a.aggregation_data['certified_students'] = len(cert_students)
# Why coerce to int here? Sometimes it's a long.
# https://cloud.google.com/appengine/docs/python/datastore/typesandpropertyclasses#IntegerProperty
s = self.summarize_students(cert_students,
int(a.activity_ordinal))
a.aggregation_data['certified_study_eligible_dict'] = s
util.profiler.add_event('...save activities')
self.save('activity', referenced_activities)
return referenced_activities
def aggregate_to_cohorts(self, changed_activities):
"""Summarize activity-stats at the cohort level."""
# Query for activities whose scheduled_date may have changed.
# Combine it with the changed activities that were passed in,
# preferring in-memory versions where there are overlaps.
changed_activity_index = {a.id: a for a in changed_activities}
for a in self.get_changed('activity'):
if a.id not in changed_activity_index:
changed_activity_index[a.id] = a
# What cohorts need to be updated based on the changed activities?
cohort_ids = list(set([a.assc_cohort_list[0]
for a in changed_activity_index.values()]))
if len(cohort_ids) is 0:
return []
util.profiler.add_event('...get related cohorts')
referenced_cohorts = Cohort.get_by_id(cohort_ids) # speedy key fetch
# Index cohorts by id for easy reference. Also reset their aggregation
# data so we don't add cumulatively across aggregation runs.
util.profiler.add_event('...process')
cohort_index = {}
for c in referenced_cohorts:
cohort_index[c.id] = c
c.aggregation_data = {}
# Re-query for all activities in these cohorts so we can calculate true
# totals, not just incremental changes.
util.profiler.add_event('...get related activities')
query = Activity.all().filter('deleted =', False)
query.filter('is_test =', False)
query.filter('user_type =', 'student')
query.filter('assc_cohort_list IN', cohort_ids)
referenced_activities = query.run()
# Iterate over activities, incrementing cohort values.
util.profiler.add_event('...process')
for a in referenced_activities:
# Prefer recently-changed, in-memory entities
if a.id in changed_activity_index:
a = changed_activity_index[a.id]
a_agg = a.aggregation_data
cohort = cohort_index[a.assc_cohort_list[0]]
# Why coerce to int here? Sometimes it's a long.
# https://cloud.google.com/appengine/docs/python/datastore/typesandpropertyclasses#IntegerProperty
c_agg = cohort.aggregation_data.setdefault(
int(a.activity_ordinal),
copy.deepcopy(cohort.aggregation_data_template))
# Count incomplete rosters.
if not a.roster_complete:
c_agg['incomplete_rosters'] += 1
# Store activity status.
c_agg[a.interpreted_status()] += 1
# Store student counts.
c_agg['total_students'] += a_agg['total_students']
c_agg['certified_students'] += a_agg['certified_students']
# Count all the certified study eligible stats.
for variable, value in a_agg['certified_study_eligible_dict'].items():
c_agg['certified_study_eligible_dict'][variable] += value
util.profiler.add_event('...save cohorts')
self.save('cohort', referenced_cohorts)
return referenced_cohorts
class Indexer(NamedModel):
"""
Update entity search data store
Design
The indexer will find entities which have changed and add them
to the entity index.
orginal author
bmh September 2013
"""
# Data
last_check = db.DateTimeProperty()
# Do not search
blacklist = [
'Aggregator', # boring
'Pd', # personal
'Indexer', # boring
'QualtricsLink', # boring
# 'ErrorChecker', # boring
'Stratifier', # boring
'StratifierHistory', # boring
]
# Limit the number of items we are willing to index
max_entities_to_index = 10
doc_id_leading_char = re.compile(r'[A-Za-z]')
doc_id_valid_char = re.compile(r'[A-Za-z0-9_]')
def get_index(self):
return search.Index(name='index')
def get_changed_entities(self):
# get classes
# not blacklisted
klass_names = [
k.kind_name
for k in Kind.all()
if k.kind_name not in self.blacklist
]
# check for cases where the Klass cannot be converted (KeyError)
# this happens in production for reasons I don't understand
# bmh 2013
Klasses = []
for k in klass_names:
try:
Klass = core.kind_to_class(k)
except AttributeError:
pass
else:
Klasses.append(Klass)
# get entites
if not self.last_check:
self.last_check = 0
entities = [
e
for Klass in Klasses
for e in Klass.all().filter(
"modified > ", self.last_check
).order("modified").fetch(self.max_entities_to_index)
]
return entities
def entity_to_document(self, entity):
doc = search.Document(
doc_id=self.clean_doc_id(entity.key().name()),
fields=[
search.TextField(name=self.clean_doc_id(key),
value=unicode(value))
for key, value
in entity.to_dict().items()
]
)
return doc
def clean_doc_id(self, string):
"""Make sure any strings going into the indexer are properly formed."""
doc_id_leading_char = r'^[^A-Za-z]*'
doc_id_valid_char = r'[^A-Za-z0-9_]'
# Get rid of any unicode
ascii_string = string.encode('ascii', 'ignore')
# Get rid of chars that are gobally wrong.
partially_clean = re.sub(doc_id_valid_char, '', ascii_string)
# Get rid of all chars from the beginning up to the first valid lead
# char.
clean_string = re.sub(doc_id_leading_char, '', partially_clean)
return clean_string
class ErrorChecker(NamedModel):
"""
Check for recent errors using log api
Design
The error checker will keep track of how long it has been since a check
occured and how long since an email alert was sent.
It will also facilite searching the error log.
orginal author
bmh October 2013
"""
# constants
# How long will we wait between emails?
minimum_seconds_between_emails = 60 * 60 # 1 hour
maximum_requests_to_email = 100 # how long can the log be
maximum_entries_per_request = 100 # how long can the log be
# error levels
level_map = collections.defaultdict(lambda x: 'UNKNOWN')
level_map[logservice.LOG_LEVEL_DEBUG] = 'DEBUG'
level_map[logservice.LOG_LEVEL_INFO] = 'INFO'
level_map[logservice.LOG_LEVEL_WARNING] = 'WARNING'
level_map[logservice.LOG_LEVEL_ERROR] = 'ERROR'
level_map[logservice.LOG_LEVEL_CRITICAL] = 'CRITICAL'
# email stuff
to_address = config.to_dev_team_email_address
from_address = config.from_yellowstone_email_address
subject = "Error(s) during calls to: "
body = """
President Roosevelt,
Old Faithful has erupted again. More information is available on the dashboard.
https://console.cloud.google.com/logs/viewer?project=yellowstoneplatform&minLogLevel=500
your humble national park,
Yellowstone
"""
# Data
last_check = db.DateTimeProperty()
last_email = db.DateTimeProperty()
def to_unix_time(self, dt):
return calendar.timegm( dt.timetuple() )
def to_utc_time(self, unix_time):
return datetime.datetime.utcfromtimestamp(unix_time)
def any_new_errors(self):
since = self.last_check if self.last_check else self.datetime()
log_stream = logservice.fetch(
start_time = self.to_unix_time( since ),
minimum_log_level = logservice.LOG_LEVEL_ERROR
)
return next(iter(log_stream), None) is not None
def get_recent_log(self):
""" see api
https://developers.google.com/appengine/docs/python/logs/functions
"""
out = ""
since = self.last_check if self.last_check else self.datetime()
log_stream = logservice.fetch(
start_time = self.to_unix_time( since ),
minimum_log_level = logservice.LOG_LEVEL_ERROR,
include_app_logs = True
)
requests = itertools.islice(log_stream, 0, self.maximum_requests_to_email)
for r in requests:
log = itertools.islice(r.app_logs, 0, self.maximum_entries_per_request)
log = [
self.level_map[l.level] + '\t' +
str(self.to_utc_time(l.time)) + '\t' +
l.message + '\n'
for l in log
]
out = out + r.combined + '\n' + ''.join(log) + '\n\n'
return out
def get_error_summary(self):
""" A short high level overview of the error.
This was designed to serve as the email subject line so that
developers could quickly see if an error was a new type of error.
It returns the resources that were requested as a comma
seperated string:
e.g.
/api/put/pd, /api/...
see google api
https://developers.google.com/appengine/docs/python/logs/functions
"""
# Get a record of all the requests which generated an error
# since the last check was performed, typically this will be
# at most one error, but we don't want to ignore other errors if
# they occurred.
since = self.last_check if self.last_check else self.datetime()
log_stream = logservice.fetch(
start_time=self.to_unix_time(since),
minimum_log_level=logservice.LOG_LEVEL_ERROR,
include_app_logs=True
)
# Limit the maximum number of errors that will be processed
# to avoid insane behavior that should never happen, like
# emailing a report with a googleplex error messages.
requests = itertools.islice(
log_stream, 0, self.maximum_requests_to_email
)
# This should return a list of any requested resources
# that led to an error. Usually there will only be one.
# for example:
# /api/put/pd
# or
# /api/put/pd, /api/another_call
out = ', '.join(set([r.resource for r in requests]))
return out
def should_email(self):
since_last = ( self.datetime() - self.last_email ).seconds if self.last_email else 10000000
return since_last > self.minimum_seconds_between_emails
def mail_log(self):
body = self.body + self.get_recent_log()
subject = self.subject + self.get_error_summary()
mail.send_mail(self.from_address, self.to_address, subject, body)
self.last_email = self.now
return (subject, body)
def check(self):
self.now = self.datetime()
should_email = self.should_email()
new_errors = self.any_new_errors()
# check for errors
if new_errors and should_email:
message = self.mail_log()
else:
message = None
logging.info("any_new_errors: {}, should_email: {}, message: {}"
.format(new_errors, should_email, 'None' if message is None else message[0]))
self.last_check = self.now
# TODO(benjaminhaley) this should return simpler output, ala
# chris's complaint https://github.com/daveponet/pegasus/pull/197/files#diff-281842ae8036e3fcb830df255cd15610R663
return {
'email content': message,
'checked for new errors': should_email
}
class QualtricsLink(NamedModel):
"""Unique Links class for Qualtrics Survey.
Really just a URL, which is stored in the link property and the key name.
Oh, and also a session ordinal.
"""
# Links correspond to a specific session, specified here by ordinal.
session_ordinal = db.IntegerProperty()
program = db.StringProperty()
# The body of the link:
link = db.StringProperty()
@classmethod
def get_link(klass, program, session_ordinal, default_link):
@db.transactional
def _get_link_transactional(key):
link = db.get(key)
if link is None:
raise db.TransactionFailedError()
rawlink = link.link
db.delete(link)
return rawlink
# Fetch
query = QualtricsLink.all().order('link')
query.filter('session_ordinal =', session_ordinal)
query.filter('program =', program.id)
# Notify devs if running low on Qualtrics Links
if query.count() < 1000:
logging.error(
"Running low on Qualtrics Links for program {} ({})!"
"Import more soon!".format(program.name, program.abbreviation))
link_keys = query.fetch(50, keys_only=True)
if not link_keys:
logging.error('Out of links! Returning default survey link!')
return default_link
# Verify link's existence
raw_link = None
for key in link_keys:
try:
raw_link = _get_link_transactional(key)
break
except db.TransactionFailedError:
continue
# Catch if raw_link is empty
if not raw_link:
logging.error('Something went wrong with Qualtrics association!')
logging.error('Assigning user default link!')
raw_link = default_link
return raw_link
class ShortLink(NamedModel):
"""Maps from an easy-to-remember short link to another arbitrary link.
Keys are the short link text. Long links must begin with the protocol
('http:' or 'https:'). So to set up perts.net/foo to point to google, write
this:
ShortLink.create(key_name='foo', long_link='http://www.google.com').put()
"""
long_link = db.StringProperty()
deleted = db.BooleanProperty(default=False)
@classmethod
def get(klass, request_path):
"""Returns long link if short link exists, otherwise None."""
# If there's a leading slash, take it off.
if request_path[:1] == '/':
request_path = request_path[1:]
return ShortLink.get_by_key_name(request_path)
class Timestamp(NamedModel):
"""Ultra-simple timestamp storage. Give it any name you like.
Originally created to keep track of gradual processing of entities, like
collecting and aggregating pds, or systematically updating user objects.
"""
timestamp = db.DateTimeProperty()
|
[
"google.appengine.api.search.Index",
"id_model.Cohort.get_by_id",
"collections.defaultdict",
"google.appengine.ext.db.get",
"google.appengine.ext.db.put",
"google.appengine.ext.db.IntegerProperty",
"core.Model.to_dict",
"logging.error",
"google.appengine.ext.db.StringProperty",
"datetime.datetime.utcfromtimestamp",
"util.list_by",
"re.sub",
"datetime.datetime.now",
"copy.deepcopy",
"core.kind_to_class",
"id_model.User.all",
"google.appengine.ext.db.metadata.Kind.all",
"datetime.datetime",
"itertools.islice",
"id_model.User.get_by_id",
"re.compile",
"google.appengine.ext.db.DateTimeProperty",
"core.get_kind",
"google.appengine.api.mail.send_mail",
"google.appengine.ext.db.BooleanProperty",
"google.appengine.ext.db.TransactionFailedError",
"util.DictionaryProperty",
"logging.info",
"id_model.Activity.all",
"google.appengine.ext.db.delete",
"util.profiler.add_event"
] |
[((5117, 5142), 'util.DictionaryProperty', 'util.DictionaryProperty', ([], {}), '()\n', (5140, 5142), False, 'import util\n'), ((25947, 25968), 'google.appengine.ext.db.DateTimeProperty', 'db.DateTimeProperty', ([], {}), '()\n', (25966, 25968), False, 'from google.appengine.ext import db\n'), ((26404, 26426), 're.compile', 're.compile', (['"""[A-Za-z]"""'], {}), "('[A-Za-z]')\n", (26414, 26426), False, 'import re\n'), ((26452, 26478), 're.compile', 're.compile', (['"""[A-Za-z0-9_]"""'], {}), "('[A-Za-z0-9_]')\n", (26462, 26478), False, 'import re\n'), ((29121, 29165), 'collections.defaultdict', 'collections.defaultdict', (["(lambda x: 'UNKNOWN')"], {}), "(lambda x: 'UNKNOWN')\n", (29144, 29165), False, 'import collections\n'), ((29889, 29910), 'google.appengine.ext.db.DateTimeProperty', 'db.DateTimeProperty', ([], {}), '()\n', (29908, 29910), False, 'from google.appengine.ext import db\n'), ((29928, 29949), 'google.appengine.ext.db.DateTimeProperty', 'db.DateTimeProperty', ([], {}), '()\n', (29947, 29949), False, 'from google.appengine.ext import db\n'), ((34545, 34565), 'google.appengine.ext.db.IntegerProperty', 'db.IntegerProperty', ([], {}), '()\n', (34563, 34565), False, 'from google.appengine.ext import db\n'), ((34580, 34599), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (34597, 34599), False, 'from google.appengine.ext import db\n'), ((34639, 34658), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (34656, 34658), False, 'from google.appengine.ext import db\n'), ((36522, 36541), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (36539, 36541), False, 'from google.appengine.ext import db\n'), ((36556, 36589), 'google.appengine.ext.db.BooleanProperty', 'db.BooleanProperty', ([], {'default': '(False)'}), '(default=False)\n', (36574, 36589), False, 'from google.appengine.ext import db\n'), ((37175, 37196), 'google.appengine.ext.db.DateTimeProperty', 'db.DateTimeProperty', ([], {}), '()\n', (37194, 37196), False, 'from google.appengine.ext import db\n'), ((1769, 1793), 'core.Model.to_dict', 'core.Model.to_dict', (['self'], {}), '(self)\n', (1787, 1793), False, 'import core\n'), ((5569, 5593), 'core.kind_to_class', 'core.kind_to_class', (['kind'], {}), '(kind)\n', (5587, 5593), False, 'import core\n'), ((7468, 7517), 'google.appengine.ext.db.put', 'db.put', (['changed_entities'], {'set_modified_time': '(False)'}), '(changed_entities, set_modified_time=False)\n', (7474, 7517), False, 'from google.appengine.ext import db\n'), ((10650, 10694), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...get changed pd"""'], {}), "('...get changed pd')\n", (10673, 10694), False, 'import util\n'), ((10900, 10937), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...process"""'], {}), "('...process')\n", (10923, 10937), False, 'import util\n'), ((11734, 11781), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...get changed users"""'], {}), "('...get changed users')\n", (11757, 11781), False, 'import util\n'), ((12233, 12270), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...process"""'], {}), "('...process')\n", (12256, 12270), False, 'import util\n'), ((12293, 12336), 'util.list_by', 'util.list_by', (['changed_progress_pds', '"""scope"""'], {}), "(changed_progress_pds, 'scope')\n", (12305, 12336), False, 'import util\n'), ((15246, 15286), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...save users"""'], {}), "('...save users')\n", (15269, 15286), False, 'import util\n'), ((17165, 17217), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...get related activities"""'], {}), "('...get related activities')\n", (17188, 17217), False, 'import util\n'), ((18336, 18373), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...process"""'], {}), "('...process')\n", (18359, 18373), False, 'import util\n'), ((19230, 19277), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...get related users"""'], {}), "('...get related users')\n", (19253, 19277), False, 'import util\n'), ((20221, 20258), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...process"""'], {}), "('...process')\n", (20244, 20258), False, 'import util\n'), ((22380, 22425), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...save activities"""'], {}), "('...save activities')\n", (22403, 22425), False, 'import util\n'), ((23325, 23374), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...get related cohorts"""'], {}), "('...get related cohorts')\n", (23348, 23374), False, 'import util\n'), ((23404, 23432), 'id_model.Cohort.get_by_id', 'Cohort.get_by_id', (['cohort_ids'], {}), '(cohort_ids)\n', (23420, 23432), False, 'from id_model import Activity, Cohort, User\n'), ((23610, 23647), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...process"""'], {}), "('...process')\n", (23633, 23647), False, 'import util\n'), ((23919, 23971), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...get related activities"""'], {}), "('...get related activities')\n", (23942, 23971), False, 'import util\n'), ((24290, 24327), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...process"""'], {}), "('...process')\n", (24313, 24327), False, 'import util\n'), ((25564, 25606), 'util.profiler.add_event', 'util.profiler.add_event', (['"""...save cohorts"""'], {}), "('...save cohorts')\n", (25587, 25606), False, 'import util\n'), ((26521, 26547), 'google.appengine.api.search.Index', 'search.Index', ([], {'name': '"""index"""'}), "(name='index')\n", (26533, 26547), False, 'from google.appengine.api import search\n'), ((28271, 28314), 're.sub', 're.sub', (['doc_id_valid_char', '""""""', 'ascii_string'], {}), "(doc_id_valid_char, '', ascii_string)\n", (28277, 28314), False, 'import re\n'), ((28431, 28479), 're.sub', 're.sub', (['doc_id_leading_char', '""""""', 'partially_clean'], {}), "(doc_id_leading_char, '', partially_clean)\n", (28437, 28479), False, 'import re\n'), ((30086, 30131), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['unix_time'], {}), '(unix_time)\n', (30120, 30131), False, 'import datetime\n'), ((30897, 30960), 'itertools.islice', 'itertools.islice', (['log_stream', '(0)', 'self.maximum_requests_to_email'], {}), '(log_stream, 0, self.maximum_requests_to_email)\n', (30913, 30960), False, 'import itertools\n'), ((32553, 32616), 'itertools.islice', 'itertools.islice', (['log_stream', '(0)', 'self.maximum_requests_to_email'], {}), '(log_stream, 0, self.maximum_requests_to_email)\n', (32569, 32616), False, 'import itertools\n'), ((33285, 33350), 'google.appengine.api.mail.send_mail', 'mail.send_mail', (['self.from_address', 'self.to_address', 'subject', 'body'], {}), '(self.from_address, self.to_address, subject, body)\n', (33299, 33350), False, 'from google.appengine.api import mail\n'), ((5460, 5495), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (5477, 5495), False, 'import datetime\n'), ((6739, 6762), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6760, 6762), False, 'import datetime\n'), ((11464, 11499), 'id_model.User.get_by_id', 'User.get_by_id', (['referenced_user_ids'], {}), '(referenced_user_ids)\n', (11478, 11499), False, 'from id_model import Activity, Cohort, User\n'), ((17628, 17697), 'logging.info', 'logging.info', (['"""Using normal classroom query for classroom activities"""'], {}), "('Using normal classroom query for classroom activities')\n", (17640, 17697), False, 'import logging\n'), ((17852, 17928), 'logging.info', 'logging.info', (['"""> 30 classrooms: Using cohort query for classroom activities"""'], {}), "('> 30 classrooms: Using cohort query for classroom activities')\n", (17864, 17928), False, 'import logging\n'), ((19684, 19747), 'logging.info', 'logging.info', (['"""Using normal classroom query for activity users"""'], {}), "('Using normal classroom query for activity users')\n", (19696, 19747), False, 'import logging\n'), ((19886, 19956), 'logging.info', 'logging.info', (['"""> 30 classrooms: using cohort query for activity users"""'], {}), "('> 30 classrooms: using cohort query for activity users')\n", (19898, 19956), False, 'import logging\n'), ((31007, 31072), 'itertools.islice', 'itertools.islice', (['r.app_logs', '(0)', 'self.maximum_entries_per_request'], {}), '(r.app_logs, 0, self.maximum_entries_per_request)\n', (31023, 31072), False, 'import itertools\n'), ((34831, 34842), 'google.appengine.ext.db.get', 'db.get', (['key'], {}), '(key)\n', (34837, 34842), False, 'from google.appengine.ext import db\n'), ((34966, 34981), 'google.appengine.ext.db.delete', 'db.delete', (['link'], {}), '(link)\n', (34975, 34981), False, 'from google.appengine.ext import db\n'), ((35540, 35601), 'logging.error', 'logging.error', (['"""Out of links! Returning default survey link!"""'], {}), "('Out of links! Returning default survey link!')\n", (35553, 35601), False, 'import logging\n'), ((35964, 36029), 'logging.error', 'logging.error', (['"""Something went wrong with Qualtrics association!"""'], {}), "('Something went wrong with Qualtrics association!')\n", (35977, 36029), False, 'import logging\n'), ((36042, 36087), 'logging.error', 'logging.error', (['"""Assigning user default link!"""'], {}), "('Assigning user default link!')\n", (36055, 36087), False, 'import logging\n'), ((8475, 8523), 'copy.deepcopy', 'copy.deepcopy', (['student.aggregation_data_template'], {}), '(student.aggregation_data_template)\n', (8488, 8523), False, 'import copy\n'), ((11061, 11084), 'core.get_kind', 'core.get_kind', (['pd.scope'], {}), '(pd.scope)\n', (11074, 11084), False, 'import core\n'), ((12927, 12964), 'util.list_by', 'util.list_by', (['pds', '"""activity_ordinal"""'], {}), "(pds, 'activity_ordinal')\n", (12939, 12964), False, 'import util\n'), ((17234, 17248), 'id_model.Activity.all', 'Activity.all', ([], {}), '()\n', (17246, 17248), False, 'from id_model import Activity, Cohort, User\n'), ((19294, 19304), 'id_model.User.all', 'User.all', ([], {}), '()\n', (19302, 19304), False, 'from id_model import Activity, Cohort, User\n'), ((23988, 24002), 'id_model.Activity.all', 'Activity.all', ([], {}), '()\n', (24000, 24002), False, 'from id_model import Activity, Cohort, User\n'), ((24904, 24951), 'copy.deepcopy', 'copy.deepcopy', (['cohort.aggregation_data_template'], {}), '(cohort.aggregation_data_template)\n', (24917, 24951), False, 'import copy\n'), ((26703, 26713), 'google.appengine.ext.db.metadata.Kind.all', 'Kind.all', ([], {}), '()\n', (26711, 26713), False, 'from google.appengine.ext.db.metadata import Kind\n'), ((27026, 27047), 'core.kind_to_class', 'core.kind_to_class', (['k'], {}), '(k)\n', (27044, 27047), False, 'import core\n'), ((34894, 34921), 'google.appengine.ext.db.TransactionFailedError', 'db.TransactionFailedError', ([], {}), '()\n', (34919, 34921), False, 'from google.appengine.ext import db\n'), ((13657, 13702), 'copy.deepcopy', 'copy.deepcopy', (['user.aggregation_data_template'], {}), '(user.aggregation_data_template)\n', (13670, 13702), False, 'import copy\n')]
|
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import click
from taw.util import *
from taw.taw import * # This must be the end of imports
# =======================
# bash completion stuff
# =======================
click_global_dns_record_types = click.Choice(['A', 'AAAA', 'ALIAS', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'])
def click_complete_for_zones(ctx, args, incomplete):
profile, _ = click_bash_completion_parse_profile_and_region(args)
possible_zones = look_for_completion_keywords(profile, "zone", r'^zone$')
return possible_zones
# ==============
# ZONE COMMAND
# ==============
@taw.group("zone")
@pass_global_parameters
def zone_group(params):
""" manage zones """
def complete_subdomain_name(possibly_subdomain, domain_name):
""" Complete a full domain name from a possibly subdomain name.
eg)
complete_subdomain_name("abc", "example.com.") => "abc.example.com."
complete_subdomain_name("abc.ab.", "example.com.") => "abc.ab."
complete_subdomain_name("abc.ab.example.com", "example.com.") => "abc.ab.example.com."
"""
if not domain_name.endswith('.'): domain_name += '.'
if possibly_subdomain.endswith('.' + domain_name): return possibly_subdomain
if possibly_subdomain.endswith('.' + domain_name[:-1]): return possibly_subdomain + '.'
if possibly_subdomain.endswith('.'): return possibly_subdomain
return possibly_subdomain + '.' + domain_name
@zone_group.command("add")
@click.argument('zonename', metavar='<zone name)>', autocompletion=click_complete_for_zones)
@click.argument('name', metavar='<subdomain name>')
@click.argument('type_str', metavar='<A|NS|TXT|...>', type=click_global_dns_record_types)
@click.argument('values', nargs=-1, required=True, metavar='<value (eg: 192.168.3.11)>')
@click.option('--ttl', metavar='TTL', type=int, default=3600)
@click.option('--weight', type=int, default=100)
@pass_global_parameters
def add_zonecmd(params, zonename, name, type_str, values, ttl, weight):
""" add a record to zone """
r53 = get_r53_connection()
zone_id = convert_zone_name_to_zone_id(zonename)
name = complete_subdomain_name(name, zonename)
resource_record_set = {
'Name': name,
'Type': type_str,
'TTL': ttl,
'ResourceRecords': [{'Value': v} for v in list(values)],
}
result = r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': 'taw.py',
'Changes': [{
'Action': 'UPSERT',
'ResourceRecordSet': resource_record_set,
}]
}
)
change_info = result['ChangeInfo']
print("ID: %s, Status: %s" % (change_info['Id'], change_info['Status']))
@zone_group.command("name")
@click.argument('zonename', metavar='<zone name)>', autocompletion=click_complete_for_zones)
@click.argument('name', metavar='<subdomain name>')
@click.argument('targetname', metavar='<instance name|instance ID|bucket name:>')
@click.option('--ttl', metavar='TTL', type=int, default=3600)
@click.option('--weight', type=int, default=100)
@pass_global_parameters
def name_zonecmd(params, zonename, name, targetname, ttl, weight):
""" add an A record to the specified zone for a given EC2 instance or a given bucket.
\b
eg) Give an A record of 'db.example.com' for an instance with name 'db003'.
taw name example.com db db003
eg) Give a CNAME record of S3 Bucket 'static.example.com' for an S3 Bucket 'static.example.com'
taw name example.com static static.example.com:
"""
r53 = get_r53_connection()
zone_id = convert_zone_name_to_zone_id(zonename)
name = complete_subdomain_name(name, zonename)
if targetname.endswith(":"):
s3 = get_s3_connection()
bucket_name = targetname[:-1]
region_name_of_the_bucket = s3.meta.client.get_bucket_location(Bucket=bucket_name)["LocationConstraint"]
# see http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
endpoint_url_domain = bucket_name + ".s3-website-" + region_name_of_the_bucket + ".amazonaws.com"
if bucket_name != name[:-1]:
error_exit("The bucket name (%s) must be the same as the (sub)domain name (%s).\nThis is a requirement by Amazon S3." % (bucket_name, name[:-1]))
resource_record_set = {
'Name': name,
'Type': 'CNAME',
'TTL': ttl,
'ResourceRecords': [{'Value': endpoint_url_domain}],
}
else:
instance = convert_host_name_to_instance(targetname)
if instance.public_ip_address is None:
error_exit("The instance '%s' (%s) has no public IP address" % (extract_name_from_tags(instance.tags), instance.id))
values = [instance.public_ip_address]
resource_record_set = {
'Name': name,
'Type': 'A',
'TTL': ttl,
'ResourceRecords': [{'Value': v} for v in list(values)],
}
result = r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': 'taw.py',
'Changes': [{
'Action': 'UPSERT',
'ResourceRecordSet': resource_record_set,
}]
}
)
change_info = result['ChangeInfo']
print("ID: %s, Status: %s" % (change_info['Id'], change_info['Status']))
@zone_group.command("rm")
@click.argument('zonename', metavar='<zone name)>', autocompletion=click_complete_for_zones)
@click.argument('name', metavar='<subdomain name>')
@click.argument('type_str', default='A', metavar='[A(default)|NS|TXT|...]', type=click_global_dns_record_types)
@click.option('--force', is_flag=True, help='actually delete a record')
@pass_global_parameters
def rm_zonecmd(params, zonename, name, type_str, force):
""" delete a record from zone """
r53 = get_r53_connection()
zone_id = convert_zone_name_to_zone_id(zonename)
name = complete_subdomain_name(name, zonename)
zone_to_delete = None
for zone in r53.list_resource_record_sets(HostedZoneId=zone_id)['ResourceRecordSets']:
if zone['Name'] == name and zone['Type'] == type_str:
zone_to_delete = zone
break
else:
error_exit("No such record (name='%s', type='%s')" % (name, type_str))
if force:
result = r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': 'taw.py',
'Changes': [{
'Action': 'DELETE',
'ResourceRecordSet': zone_to_delete,
}]
}
)
change_info = result['ChangeInfo']
print("ID: %s, Status: %s" % (change_info['Id'], change_info['Status']))
else:
print_info("The following record will be deleted from zone '%s' if run with '--force'" % zonename)
output_table(params, ['Name', 'Type', 'TTL', 'Value'],
[[zone_to_delete['Name'],
zone_to_delete['Type'],
zone_to_delete['TTL'],
[x['Value'] for x in zone_to_delete['ResourceRecords']]
]])
@zone_group.command("list")
@click.option('--verbose', '-v', is_flag=True, help='Verbose output.')
@click.option('--argdoc', is_flag=True, help='Show available attributes in a web browser')
@click.option('--attr', '-a', multiple=True, help='Attribute name(s).')
@click.option('--allregions', is_flag=True, help='List for all regions.')
@click.argument('zone_name_if_any', nargs=-1, metavar='<zone name>', autocompletion=click_complete_for_zones)
@pass_global_parameters
def list_zones(params, argdoc, verbose, attr, allregions, zone_name_if_any):
""" list all zones hosted by Route53 """
if argdoc:
click.launch('http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones')
return
if allregions: error_exit("Route53 zones are all global, so --allregions option is pointless.")
if zone_name_if_any:
zone_name = zone_name_if_any[0]
zone_id = convert_zone_name_to_zone_id(zone_name)
def remove_trailing_domain_name(d):
if d.endswith('.' + zone_name): return d[:-len(zone_name) - 1]
if d.endswith('.' + zone_name + '.'): return d[:-len(zone_name) - 2]
return d
all_list_columns = [
(True, "Name" , "Name" , remove_trailing_domain_name) ,
(True, "TTL" , "TTL" , ident) ,
(True, "Type" , "Type" , ident) ,
(True, "ResourceRecords", "Value", lambda x: [y['Value'] for y in x]),
]
list_columns = [x for x in all_list_columns if verbose or x[0]]
for v in attr: list_columns.append((True, v, v, ident))
header = [x[2] for x in list_columns]; rows = []
r53 = get_r53_connection()
for zone in r53.list_resource_record_sets(HostedZoneId=zone_id)['ResourceRecordSets']:
row = [f(zone[i]) for _, i, _, f in list_columns]
rows.append(row)
else:
completion_keywords = []
r53 = get_r53_connection()
header = ['Name', 'Comment', 'IsPrivate', 'RecordSetCount']; rows = []
for zone in r53.list_hosted_zones()['HostedZones']:
config = zone['Config']
row = [zone['Name'], config['Comment'] if 'Comment' in config else '',
config['PrivateZone'] if 'PrivateZone' in config else '', zone['ResourceRecordSetCount']]
rows.append(row)
completion_keywords.append({"zone": zone['Name']})
update_completion_keywords(completion_keywords, "zone")
output_table(params, header, rows)
|
[
"click.Choice",
"click.option",
"click.argument",
"click.launch"
] |
[((305, 394), 'click.Choice', 'click.Choice', (["['A', 'AAAA', 'ALIAS', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT']"], {}), "(['A', 'AAAA', 'ALIAS', 'CNAME', 'MX', 'NS', 'PTR', 'SOA',\n 'SRV', 'TXT'])\n", (317, 394), False, 'import click\n'), ((1549, 1645), 'click.argument', 'click.argument', (['"""zonename"""'], {'metavar': '"""<zone name)>"""', 'autocompletion': 'click_complete_for_zones'}), "('zonename', metavar='<zone name)>', autocompletion=\n click_complete_for_zones)\n", (1563, 1645), False, 'import click\n'), ((1642, 1692), 'click.argument', 'click.argument', (['"""name"""'], {'metavar': '"""<subdomain name>"""'}), "('name', metavar='<subdomain name>')\n", (1656, 1692), False, 'import click\n'), ((1694, 1787), 'click.argument', 'click.argument', (['"""type_str"""'], {'metavar': '"""<A|NS|TXT|...>"""', 'type': 'click_global_dns_record_types'}), "('type_str', metavar='<A|NS|TXT|...>', type=\n click_global_dns_record_types)\n", (1708, 1787), False, 'import click\n'), ((1784, 1876), 'click.argument', 'click.argument', (['"""values"""'], {'nargs': '(-1)', 'required': '(True)', 'metavar': '"""<value (eg: 192.168.3.11)>"""'}), "('values', nargs=-1, required=True, metavar=\n '<value (eg: 192.168.3.11)>')\n", (1798, 1876), False, 'import click\n'), ((1873, 1933), 'click.option', 'click.option', (['"""--ttl"""'], {'metavar': '"""TTL"""', 'type': 'int', 'default': '(3600)'}), "('--ttl', metavar='TTL', type=int, default=3600)\n", (1885, 1933), False, 'import click\n'), ((1935, 1982), 'click.option', 'click.option', (['"""--weight"""'], {'type': 'int', 'default': '(100)'}), "('--weight', type=int, default=100)\n", (1947, 1982), False, 'import click\n'), ((2851, 2947), 'click.argument', 'click.argument', (['"""zonename"""'], {'metavar': '"""<zone name)>"""', 'autocompletion': 'click_complete_for_zones'}), "('zonename', metavar='<zone name)>', autocompletion=\n click_complete_for_zones)\n", (2865, 2947), False, 'import click\n'), ((2944, 2994), 'click.argument', 'click.argument', (['"""name"""'], {'metavar': '"""<subdomain name>"""'}), "('name', metavar='<subdomain name>')\n", (2958, 2994), False, 'import click\n'), ((2996, 3081), 'click.argument', 'click.argument', (['"""targetname"""'], {'metavar': '"""<instance name|instance ID|bucket name:>"""'}), "('targetname', metavar='<instance name|instance ID|bucket name:>'\n )\n", (3010, 3081), False, 'import click\n'), ((3078, 3138), 'click.option', 'click.option', (['"""--ttl"""'], {'metavar': '"""TTL"""', 'type': 'int', 'default': '(3600)'}), "('--ttl', metavar='TTL', type=int, default=3600)\n", (3090, 3138), False, 'import click\n'), ((3140, 3187), 'click.option', 'click.option', (['"""--weight"""'], {'type': 'int', 'default': '(100)'}), "('--weight', type=int, default=100)\n", (3152, 3187), False, 'import click\n'), ((5519, 5615), 'click.argument', 'click.argument', (['"""zonename"""'], {'metavar': '"""<zone name)>"""', 'autocompletion': 'click_complete_for_zones'}), "('zonename', metavar='<zone name)>', autocompletion=\n click_complete_for_zones)\n", (5533, 5615), False, 'import click\n'), ((5612, 5662), 'click.argument', 'click.argument', (['"""name"""'], {'metavar': '"""<subdomain name>"""'}), "('name', metavar='<subdomain name>')\n", (5626, 5662), False, 'import click\n'), ((5664, 5778), 'click.argument', 'click.argument', (['"""type_str"""'], {'default': '"""A"""', 'metavar': '"""[A(default)|NS|TXT|...]"""', 'type': 'click_global_dns_record_types'}), "('type_str', default='A', metavar='[A(default)|NS|TXT|...]',\n type=click_global_dns_record_types)\n", (5678, 5778), False, 'import click\n'), ((5776, 5846), 'click.option', 'click.option', (['"""--force"""'], {'is_flag': '(True)', 'help': '"""actually delete a record"""'}), "('--force', is_flag=True, help='actually delete a record')\n", (5788, 5846), False, 'import click\n'), ((7294, 7363), 'click.option', 'click.option', (['"""--verbose"""', '"""-v"""'], {'is_flag': '(True)', 'help': '"""Verbose output."""'}), "('--verbose', '-v', is_flag=True, help='Verbose output.')\n", (7306, 7363), False, 'import click\n'), ((7365, 7459), 'click.option', 'click.option', (['"""--argdoc"""'], {'is_flag': '(True)', 'help': '"""Show available attributes in a web browser"""'}), "('--argdoc', is_flag=True, help=\n 'Show available attributes in a web browser')\n", (7377, 7459), False, 'import click\n'), ((7456, 7526), 'click.option', 'click.option', (['"""--attr"""', '"""-a"""'], {'multiple': '(True)', 'help': '"""Attribute name(s)."""'}), "('--attr', '-a', multiple=True, help='Attribute name(s).')\n", (7468, 7526), False, 'import click\n'), ((7528, 7600), 'click.option', 'click.option', (['"""--allregions"""'], {'is_flag': '(True)', 'help': '"""List for all regions."""'}), "('--allregions', is_flag=True, help='List for all regions.')\n", (7540, 7600), False, 'import click\n'), ((7602, 7714), 'click.argument', 'click.argument', (['"""zone_name_if_any"""'], {'nargs': '(-1)', 'metavar': '"""<zone name>"""', 'autocompletion': 'click_complete_for_zones'}), "('zone_name_if_any', nargs=-1, metavar='<zone name>',\n autocompletion=click_complete_for_zones)\n", (7616, 7714), False, 'import click\n'), ((7880, 8008), 'click.launch', 'click.launch', (['"""http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones"""'], {}), "(\n 'http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones'\n )\n", (7892, 8008), False, 'import click\n')]
|
from django.http import HttpResponse
def home_page_view(request):
return HttpResponse('Hello, Worlds!!')
|
[
"django.http.HttpResponse"
] |
[((78, 109), 'django.http.HttpResponse', 'HttpResponse', (['"""Hello, Worlds!!"""'], {}), "('Hello, Worlds!!')\n", (90, 109), False, 'from django.http import HttpResponse\n')]
|
from django.contrib import admin
from .models import Hero
admin.site.register(Hero)
|
[
"django.contrib.admin.site.register"
] |
[((59, 84), 'django.contrib.admin.site.register', 'admin.site.register', (['Hero'], {}), '(Hero)\n', (78, 84), False, 'from django.contrib import admin\n')]
|
'''
Shows the grid world using pygame.
Globecom Tutorial - December 7, 2021
Tutorial 29: Machine Learning for MIMO Systems with Large Arrays
<NAME> (NCSU),
<NAME> (UFPA) and
<NAME>. (NCSU)
'''
import time
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import pygame as pg
import pyscreenshot as ImageGrab
import imageio
#from beamforming_calculation import AnalogBeamformer
SLEEP_TIME = 0.3 #time to sleep and allow visualizing the world :)
class Mimo_RL_render:
def __init__(self, analogBeamformer):
self.should_save_images_as_gif = False #Not working: enables saving images in the end
self.analogBeamformer = analogBeamformer
self.Rx_position = (0,0)
self.Rx2_position = (5,5)
self.scheduled_user = 0
self.beam_index = 0
#Fixed objects, which do not move
self.Tx = [1,2]
self.wall1 = [3,4]
self.wall2 = [4,4]
# create discrete colormap
cmap = colors.ListedColormap(['gray','red', 'green', 'blue'])
cmap.set_bad(color='w', alpha=0)
fig = plt.figure()
self.pg = pg
self.pg.init()
self.screen = pg.display.set_mode((600,600))
clock = pg.time.Clock()
back = pg.image.load("./figs/grid6x6.png")
self.back = pg.transform.scale(back, (600,600))
antenna = pg.image.load("./figs/antenna.png").convert_alpha()
self.antenna = pg.transform.scale(antenna, (40,80))
wall_image = pg.image.load("./figs/wall.png").convert_alpha()
self.wall_image = pg.transform.scale(wall_image, (90,90))
carro1 = pg.image.load("./figs/carro1.png").convert_alpha()
self.carro1 = pg.transform.scale(carro1, (80,80))
carro2 = pg.image.load("./figs/carro2.png").convert_alpha()
self.carro2 = pg.transform.scale(carro2, (80,80))
if self.should_save_images_as_gif:
self.images = []
def set_positions(self, positions, scheduled_user, beam_index):
#positions = self.mimo_RL_Environment.get_UE_positions()
self.Rx_position = positions[0]
self.Rx2_position = positions[1]
self.scheduled_user = scheduled_user
self.beam_index = beam_index
def plot_beam(self, scheduled_user, beam_index):
fig, ax2 = plt.subplots(subplot_kw={'projection': 'polar'})
if(scheduled_user==0):
colorCar = 'blue'
else: #scheduled_user==1
colorCar = 'red'
angles = self.analogBeamformer.angles_for_plotting
beam_values = self.analogBeamformer.beams_for_plotting[:,beam_index]
beam_values = np.abs(beam_values) #take absolute values
ax2.plot(angles, beam_values, color=colorCar)
ax2.set_axis_off()
ax2.grid(False)
plt.savefig('chosen_beam.png', transparent=True, bbox_inches='tight')
def render_back(self):
self.screen.fill([255,255,255])
self.screen.blit(self.back,(0,0))
def render_antenna(self):
self.screen.blit(self.antenna,(self.Tx[0]*100+30,abs(self.Tx[1]-5)*100+16))
def render_beams(self):
bestBeam = pg.image.load("chosen_beam.png").convert_alpha()
bestBeam = pg.transform.scale(bestBeam, (300,300))
self.screen.blit(bestBeam,(self.Tx[0]*100-100,abs(self.Tx[1]-5)*100-100))
def render_wall1(self):
self.screen.blit(self.wall_image,(self.wall1[0]*100 + 5,abs(self.wall1[1]-5)*100 + 9))
def render_wall2(self):
self.screen.blit(self.wall_image,(self.wall2[0]*100 + 5,abs(self.wall2[1]-5)*100 + 9))
def render_Rx(self):
self.screen.blit(self.carro1,(self.Rx_position[0]*100 + 10, abs(self.Rx_position[1]-5)*100 + 10))
def render_Rx2(self):
self.screen.blit(self.carro2,(self.Rx2_position[0]*100 + 10, abs(self.Rx2_position[1]-5)*100 + 10))
def render(self):
time.sleep(SLEEP_TIME)
#plot beam
self.plot_beam(self.scheduled_user, self.beam_index)
self.render_back()
self.render_Rx()
self.render_Rx2()
self.render_antenna()
self.render_wall1()
self.render_wall2()
self.render_beams()
#plt.pause(1)
if self.should_save_images_as_gif:
raise NotImplementedError()
self.images.append(ImageGrab.grab(bbox=(1960, 1030, 2760, 1830)))
self.pg.display.update()
def save_images_as_gif(self, file_name, duration=3):
raise NotImplementedError()
gif = imageio.mimsave(file_name, self.images, 'GIF', duration=duration)
#print(gif)
print('Wrote file', file_name)
|
[
"imageio.mimsave",
"numpy.abs",
"pygame.display.set_mode",
"pyscreenshot.grab",
"matplotlib.pyplot.subplots",
"time.sleep",
"pygame.transform.scale",
"matplotlib.pyplot.figure",
"pygame.image.load",
"pygame.time.Clock",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.savefig"
] |
[((983, 1038), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['gray', 'red', 'green', 'blue']"], {}), "(['gray', 'red', 'green', 'blue'])\n", (1004, 1038), False, 'from matplotlib import colors\n'), ((1093, 1105), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1103, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1203), 'pygame.display.set_mode', 'pg.display.set_mode', (['(600, 600)'], {}), '((600, 600))\n', (1191, 1203), True, 'import pygame as pg\n'), ((1219, 1234), 'pygame.time.Clock', 'pg.time.Clock', ([], {}), '()\n', (1232, 1234), True, 'import pygame as pg\n'), ((1250, 1285), 'pygame.image.load', 'pg.image.load', (['"""./figs/grid6x6.png"""'], {}), "('./figs/grid6x6.png')\n", (1263, 1285), True, 'import pygame as pg\n'), ((1306, 1342), 'pygame.transform.scale', 'pg.transform.scale', (['back', '(600, 600)'], {}), '(back, (600, 600))\n', (1324, 1342), True, 'import pygame as pg\n'), ((1435, 1472), 'pygame.transform.scale', 'pg.transform.scale', (['antenna', '(40, 80)'], {}), '(antenna, (40, 80))\n', (1453, 1472), True, 'import pygame as pg\n'), ((1568, 1608), 'pygame.transform.scale', 'pg.transform.scale', (['wall_image', '(90, 90)'], {}), '(wall_image, (90, 90))\n', (1586, 1608), True, 'import pygame as pg\n'), ((1698, 1734), 'pygame.transform.scale', 'pg.transform.scale', (['carro1', '(80, 80)'], {}), '(carro1, (80, 80))\n', (1716, 1734), True, 'import pygame as pg\n'), ((1824, 1860), 'pygame.transform.scale', 'pg.transform.scale', (['carro2', '(80, 80)'], {}), '(carro2, (80, 80))\n', (1842, 1860), True, 'import pygame as pg\n'), ((2302, 2350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'polar'}"}), "(subplot_kw={'projection': 'polar'})\n", (2314, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2651), 'numpy.abs', 'np.abs', (['beam_values'], {}), '(beam_values)\n', (2638, 2651), True, 'import numpy as np\n'), ((2788, 2857), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""chosen_beam.png"""'], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "('chosen_beam.png', transparent=True, bbox_inches='tight')\n", (2799, 2857), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3239), 'pygame.transform.scale', 'pg.transform.scale', (['bestBeam', '(300, 300)'], {}), '(bestBeam, (300, 300))\n', (3217, 3239), True, 'import pygame as pg\n'), ((3867, 3889), 'time.sleep', 'time.sleep', (['SLEEP_TIME'], {}), '(SLEEP_TIME)\n', (3877, 3889), False, 'import time\n'), ((4521, 4586), 'imageio.mimsave', 'imageio.mimsave', (['file_name', 'self.images', '"""GIF"""'], {'duration': 'duration'}), "(file_name, self.images, 'GIF', duration=duration)\n", (4536, 4586), False, 'import imageio\n'), ((1360, 1395), 'pygame.image.load', 'pg.image.load', (['"""./figs/antenna.png"""'], {}), "('./figs/antenna.png')\n", (1373, 1395), True, 'import pygame as pg\n'), ((1493, 1525), 'pygame.image.load', 'pg.image.load', (['"""./figs/wall.png"""'], {}), "('./figs/wall.png')\n", (1506, 1525), True, 'import pygame as pg\n'), ((1625, 1659), 'pygame.image.load', 'pg.image.load', (['"""./figs/carro1.png"""'], {}), "('./figs/carro1.png')\n", (1638, 1659), True, 'import pygame as pg\n'), ((1751, 1785), 'pygame.image.load', 'pg.image.load', (['"""./figs/carro2.png"""'], {}), "('./figs/carro2.png')\n", (1764, 1785), True, 'import pygame as pg\n'), ((3131, 3163), 'pygame.image.load', 'pg.image.load', (['"""chosen_beam.png"""'], {}), "('chosen_beam.png')\n", (3144, 3163), True, 'import pygame as pg\n'), ((4332, 4377), 'pyscreenshot.grab', 'ImageGrab.grab', ([], {'bbox': '(1960, 1030, 2760, 1830)'}), '(bbox=(1960, 1030, 2760, 1830))\n', (4346, 4377), True, 'import pyscreenshot as ImageGrab\n')]
|
#!/usr/bin/env python
import asyncio
import websockets
import datetime
import random
class SocketClient:
def __init__(self):
self.socket = websockets.connect('ws://localhost:9000')
# asyncio.get_event_loop().run_until_complete()
async def send(self, message):
await self.socket.send(message)
client = SocketClient()
await client.send("moi")
# async def hello():
# async with websockets.connect('ws://localhost:9000') as websocket:
# while True:
# now = datetime.datetime.utcnow().isoformat() + 'Z'
# await websocket.send(now)
# await asyncio.sleep(random.random() * 3)
# asyncio.get_event_loop().run_until_complete(hello())
# print("Mock bot client started, sending timestamps")
# asyncio.get_event_loop().run_forever()
|
[
"websockets.connect"
] |
[((153, 194), 'websockets.connect', 'websockets.connect', (['"""ws://localhost:9000"""'], {}), "('ws://localhost:9000')\n", (171, 194), False, 'import websockets\n')]
|
import time
import pleasehold
if __name__ == '__main__':
print('before')
before = time.time()
with pleasehold.hold('starting', 'complete') as holding:
time.sleep(2)
holding.push('1')
holding.push('2')
holding.push('3')
holding.push('4')
time.sleep(2)
holding.push('another push')
time.sleep(2)
with pleasehold.hold(begin_msg='props changed', delay=0.1, symbol='#') as holding:
time.sleep(2)
holding.push('pushed something')
time.sleep(2)
print(f'time elapsed: {time.time() - before}')
|
[
"time.sleep",
"pleasehold.hold",
"time.time"
] |
[((91, 102), 'time.time', 'time.time', ([], {}), '()\n', (100, 102), False, 'import time\n'), ((113, 152), 'pleasehold.hold', 'pleasehold.hold', (['"""starting"""', '"""complete"""'], {}), "('starting', 'complete')\n", (128, 152), False, 'import pleasehold\n'), ((173, 186), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (183, 186), False, 'import time\n'), ((299, 312), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (309, 312), False, 'import time\n'), ((358, 371), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (368, 371), False, 'import time\n'), ((382, 447), 'pleasehold.hold', 'pleasehold.hold', ([], {'begin_msg': '"""props changed"""', 'delay': '(0.1)', 'symbol': '"""#"""'}), "(begin_msg='props changed', delay=0.1, symbol='#')\n", (397, 447), False, 'import pleasehold\n'), ((468, 481), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (478, 481), False, 'import time\n'), ((531, 544), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (541, 544), False, 'import time\n'), ((573, 584), 'time.time', 'time.time', ([], {}), '()\n', (582, 584), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-11 18:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(verbose_name='Nome')),
('emails', models.TextField(blank=True, verbose_name='Emails')),
('avatar_url', models.URLField(blank=True, null=True, verbose_name='Avatar URL')),
('position', models.TextField(blank=True, verbose_name='Cargo')),
],
options={
'verbose_name': 'Professor',
'verbose_name_plural': 'Professores',
},
),
]
|
[
"django.db.models.TextField",
"django.db.models.URLField",
"django.db.models.AutoField"
] |
[((368, 461), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (384, 461), False, 'from django.db import migrations, models\n'), ((485, 522), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Nome"""'}), "(verbose_name='Nome')\n", (501, 522), False, 'from django.db import migrations, models\n'), ((552, 603), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Emails"""'}), "(blank=True, verbose_name='Emails')\n", (568, 603), False, 'from django.db import migrations, models\n'), ((637, 702), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Avatar URL"""'}), "(blank=True, null=True, verbose_name='Avatar URL')\n", (652, 702), False, 'from django.db import migrations, models\n'), ((734, 784), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Cargo"""'}), "(blank=True, verbose_name='Cargo')\n", (750, 784), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
import random
from collections import deque
from .actions import JoinAction
from .actions import VoteAction
from .phases import Signup
from .roles import Townie
from .roles import Cop
from .roles import Mafioso
next_phase = {'signup': 'day',
'day': 'night',
'night': 'day'}
legal_roles = {'t': Townie,
'm': Mafioso,
'c': Cop}
class Player(object):
_role = None
_status = None
@property
def role(self):
return self._role
def __init__(self, contact, nickname):
self.contact = contact
self.nickname = nickname
self._status = 'alive'
self.night_message = None
def is_dead(self):
return self._status == 'dead'
def is_alive(self):
return self._status == 'alive'
def lynch(self):
self._status = 'dead'
def kill(self):
self._status = 'dead'
def set_role(self, role):
self._role = role
def get_role(self):
return self._role
def __eq__(self, other):
return isinstance(other, Player) and other.contact == self.contact
def __ne__(self, other):
return not self.__eq__(other)
def resolve_action(self, target):
self.role.resolve_action(self, target)
def append_to_night_message(self, message: str):
if self.night_message:
self.night_message = self.night_message + ' ' + message
else:
self.night_message = message
def get_night_message(self):
return self.night_message
def clear_night_message(self):
self.night_message = None
class Game:
phase = 'signup'
day_number = 0
players = []
roles = []
randomized_roles = []
votes = {}
actions = {}
public_message = None
def __init__(self, game_setup: str, messenger):
assert Game.is_valid_setup(game_setup), (
'Please validate the setup before creating a game.')
self.roles = Game.parse_roles(game_setup)
self.randomized_roles = random.sample(self.roles, len(self.roles))
self.phases = [Signup()]
self.day_number = 0
self.players = []
self.messenger = messenger
self.messenger.message_all_players(
'A new game has started, join with "!join"')
def join(self, player):
"""Returns True if the player was able to join the game."""
self.add_action(JoinAction(self, player))
def old_join(self, player):
#TODO delete me
if self.phase != "signup":
return False
if player not in self.players:
self.players.append(player)
self.messenger.message_all_players(
'{name} has joined the fray!'.format(name=player.nickname))
if len(self.players) == len(self.roles):
self.assign_roles()
self.advance_phase()
return True
def vote(self, player_name: str, target_name: str):
player = self.get_player(player_name)
target = self.get_player(target_name)
assert player, "Player {name} isn't playing.".format(name=player_name)
assert target, "Target {name} isn't playing.".format(name=player_name)
self.add_action(VoteAction(self, player, target))
def old_vote(self, player_name: str, target_name: str) -> bool:
"""Returns True if the vote was cast."""
player = self.get_player(player_name)
target = self.get_player(target_name)
if self.phase != 'day':
return False
if player in self.players and target in self.players:
self.votes[player.nickname] = target
vote_count = 0
for vote_target in self.votes.values():
if target == vote_target:
vote_count += 1
self.messenger.message_all_players(
'{name} has voted for {target}, '
'bringing the total to {count}'
.format(name=player.nickname,
target=target.nickname,
count=vote_count))
if vote_count > len(self.players)/2:
target.kill()
self.messenger.message_all_players(
'The crowd has spoken and {name} has been lynched! '
'They were a {role}.'.format(name=target.nickname,
role=target.role.name()))
self.advance_phase()
return True
return False
def action(self, player_name: str, *args):
player = self.get_player(player_name)
action = player.get_role().get_action()
self.add_action(action(self, player, *args))
def target(self, player_name: str, target_name: str) -> bool:
'''Returns True if the action was accepted.'''
player = self.get_player(player_name)
target = self.get_player(target_name)
if (self.phase != 'night'
or player.is_dead()
or not player.role.has_night_action()):
return False
if player in self.players and target in self.players:
self.actions[player.nickname] = target
if self.all_actions_submitted():
self.resolve_actions()
self.send_night_messages()
self.advance_phase()
return True
return False
def check_phase_end():
if self.get_current_phase().is_over():
self.phase.append(self.get_current_phase().advance_phase())
def is_valid_setup(game_setup: str) -> bool:
roles = game_setup.split(',')
for role in roles:
if role not in legal_roles:
return False
return True
def parse_roles(game_setup: str):
roles = []
for role in game_setup.split(','):
roles.append(legal_roles[role])
return roles
def get_next_role(self):
return self.randomized_roles.pop()
def assign_roles(self):
assert len(self.roles) == len(self.players), (
'Cannot assign roles if unequal to number of players')
randomized_roles = random.sample(self.roles, len(self.players))
for player in self.players:
player.set_role(randomized_roles.pop())
for player in self.players:
self.messenger.message_player(player,
player.role.get_role_message(self))
def _assign_players_for_testing(self, *players):
'''Add players to the game and assign roles deterministically.'''
assert not self.players, 'Use this function instead of "join".'
assert len(self.roles) == len(players), (
'{role_count} roles, but {player_count} players'
.format(role_count=len(self.roles), player_count=len(players)))
players = deque(players)
for role in self.roles:
player = players.popleft()
player.set_role(role)
self.players.append(player)
self.messenger.message_player(player,
player.role.get_role_message(self))
def unrecognized_roles(roles: str) -> list:
roles = roles.split(',')
unrecognized_roles = []
for role in roles:
if role not in legal_roles:
unrecognized_roles.append(role)
return unrecognized_roles
def advance_phase(self):
self.votes = {}
self.actions = {}
self.phase = next_phase[self.phase]
if self.public_message:
self.messenger.message_all_players(self.public_message)
self.public_message = None
if self.phase == 'day':
self.day_number += 1
if self.is_game_end():
self.phase = None
else:
self.messenger.message_all_players(
'It is now {phase} {number}'.format(
phase=self.phase, number=self.day_number))
def is_game_end(self):
for player in self.players:
game_end_message = player.role.game_ending_message(self)
if game_end_message:
self.messenger.message_all_players(game_end_message)
return True
return False
def get_current_phase(self):
return self.phases[-1]
def player_count(self):
return len(self.players)
def add_player(self, player):
self.players.append(player)
def role_count(self):
return len(self.roles)
def vote_count(self, player):
votes = self.get_current_phase().compile_votes()
if player.nickname not in votes:
return 0
return votes[player.nickname]
def add_action(self, action):
phase = self.get_current_phase()
phase.add_action(action)
if (not phase.is_checking()
and not phase.is_ended()
and phase.is_phase_end(self)):
next_phase = phase.advance_phase()
self.phases.append(next_phase)
def get_player(self, name: str):
for player in self.players:
if player.nickname.lower() == name.lower():
return player
if player.contact == name:
return player
def count_faction(self, faction: str):
count = 0
for player in self.players:
if player.role.get_faction() == faction:
count += 1
return count
def count_living(self):
count = 0
for player in self.players:
if player.is_alive():
count += 1
return count
def get_team(self, faction: str):
team = []
for player in self.players:
if player.role.get_faction() == faction:
team.append(player.nickname)
return team
def all_actions_submitted(self):
for player in self.players:
if (player.is_alive()
and player.role.has_night_action()
and player.nickname not in self.actions):
return False
return True
def resolve_actions(self):
mafia_votes = {}
mafia_target = None
mafia_count = 0
for player in self.players:
if player.nickname in self.actions and player.role == Mafioso:
target = self.actions[player.nickname]
if target.nickname in mafia_votes:
mafia_votes[target.nickname] += 1
else:
mafia_votes[target.nickname] = 1
if mafia_votes[target.nickname] > mafia_count:
mafia_target = target
mafia_count += 1
if mafia_target:
mafia_target.kill()
self.public_message = ('You all awake, but there is someone missing... '
'looks like {name} has been killed during '
'the night!'
.format(name=mafia_target.nickname))
mafia_target.append_to_night_message('You have been killed! =(')
for player in self.players:
if player.role.get_faction() == 'Mafia':
player.append_to_night_message(
'You recieve word that {target} has been dispatched.'
.format(target=mafia_target.nickname))
for player in self.players:
if (player.is_alive()
and player.role.has_night_action()
and player.role != Mafioso):
player.resolve_action(self.actions[player.nickname])
def send_night_messages(self):
for player in self.players:
night_message = player.get_night_message()
if night_message:
self.messenger.message_player(player, night_message)
player.clear_night_message()
elif player.is_alive():
self.messenger.message_player(player,
'You have an uneventful night.')
def message_player(self, player, message):
self.messenger.message_player(player, message)
def message_all_players(self, message):
self.messenger.message_all_players(message)
|
[
"collections.deque"
] |
[((6892, 6906), 'collections.deque', 'deque', (['players'], {}), '(players)\n', (6897, 6906), False, 'from collections import deque\n')]
|
#!/usr/bin/env python
"""
Creates a commit that increments the versionCode in the build.gradle file.
We usually run this before releasing a new beta version to the store.
Does the following things:
Step 1: (run without arguments)
- Bump versionCode
- Make a new commit
After this run 'git review' and + the commit.
Requires the Python module 'sh' to run.
"""
import sh
import os
import re
import sys
VERSION_CODE_REGEX = r'(?P<key>versionCode) (?P<value>\d+)'
script_dir = sys.path[0]
parent_dir = os.path.join(script_dir, os.pardir)
path_prefix = os.path.abspath(parent_dir)
version_code_pattern = re.compile(VERSION_CODE_REGEX, re.MULTILINE)
def set_version_code(data):
"""
Utility function to set new versionCode
"""
match = version_code_pattern.search(data)
if not match:
raise ValueError('Version code not found')
version_code = int(match.group('value'))
next_version_code = '\g<key> {}'.format(version_code + 1)
return version_code_pattern.sub(next_version_code, data)
def transform_file(file_path, *funcs):
"""
Transforms the file given in file_path by passing it
serially through all the functions in *func and then
writing it back out to file_path
"""
with open(file_path, 'r+') as f:
data = f.read()
f.seek(0)
for func in funcs:
data = func(data)
f.write(data)
print(file_path)
def bump(file_path):
transform_file(file_path, set_version_code)
sh.cd(path_prefix)
sh.git.add('-u', file_path)
sh.git.commit('-m', 'Bump versionCode')
if __name__ == '__main__':
bump('app/build.gradle')
print('BUMP NOTICE! Run git review with bumped version and +2 if appropriate,')
|
[
"sh.git.add",
"os.path.abspath",
"sh.cd",
"sh.git.commit",
"os.path.join",
"re.compile"
] |
[((511, 546), 'os.path.join', 'os.path.join', (['script_dir', 'os.pardir'], {}), '(script_dir, os.pardir)\n', (523, 546), False, 'import os\n'), ((561, 588), 'os.path.abspath', 'os.path.abspath', (['parent_dir'], {}), '(parent_dir)\n', (576, 588), False, 'import os\n'), ((613, 657), 're.compile', 're.compile', (['VERSION_CODE_REGEX', 're.MULTILINE'], {}), '(VERSION_CODE_REGEX, re.MULTILINE)\n', (623, 657), False, 'import re\n'), ((1497, 1515), 'sh.cd', 'sh.cd', (['path_prefix'], {}), '(path_prefix)\n', (1502, 1515), False, 'import sh\n'), ((1520, 1547), 'sh.git.add', 'sh.git.add', (['"""-u"""', 'file_path'], {}), "('-u', file_path)\n", (1530, 1547), False, 'import sh\n'), ((1552, 1591), 'sh.git.commit', 'sh.git.commit', (['"""-m"""', '"""Bump versionCode"""'], {}), "('-m', 'Bump versionCode')\n", (1565, 1591), False, 'import sh\n')]
|
from flask import current_app as app
from flask import render_template
@app.route('/')
def home():
"""Landing page."""
nav = [
{'name': 'Live', 'url': 'live.html'},
{'name': 'Demo', 'url': 'demo.html'}
]
return render_template(
'index.html',
title="Stonk Scraper",
description="A natural language processing web app that evaluates sentiment of Elon's twitter feed to determine if stonk will go up or down."
)
@app.route('/live', methods=['GET', 'POST', 'PUT'])
def live():
"""Live page."""
nav = [
{'name': 'Live', 'url': 'live.html'},
{'name': 'Demo', 'url': 'demo.html'}
]
return render_template(
'live.html',
title="Stonk Scraper",
description="Live scraper for Elon's twitter feed."
)
@app.route('/demo', method=['GET', 'POST', 'PUT'])
def demo():
"""Demo page."""
nav = [
{'name': 'Live', 'url': 'live.html'},
{'name': 'Demo', 'url': 'demo.html'}
]
return render_template(
'demo.html',
title="Stonk Scraper",
description="Demo scraper for Elon's twitter feed."
)
|
[
"flask.current_app.route",
"flask.render_template"
] |
[((73, 87), 'flask.current_app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (82, 87), True, 'from flask import current_app as app\n'), ((472, 522), 'flask.current_app.route', 'app.route', (['"""/live"""'], {'methods': "['GET', 'POST', 'PUT']"}), "('/live', methods=['GET', 'POST', 'PUT'])\n", (481, 522), True, 'from flask import current_app as app\n'), ((813, 862), 'flask.current_app.route', 'app.route', (['"""/demo"""'], {'method': "['GET', 'POST', 'PUT']"}), "('/demo', method=['GET', 'POST', 'PUT'])\n", (822, 862), True, 'from flask import current_app as app\n'), ((244, 449), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Stonk Scraper"""', 'description': '"""A natural language processing web app that evaluates sentiment of Elon\'s twitter feed to determine if stonk will go up or down."""'}), '(\'index.html\', title=\'Stonk Scraper\', description=\n "A natural language processing web app that evaluates sentiment of Elon\'s twitter feed to determine if stonk will go up or down."\n )\n', (259, 449), False, 'from flask import render_template\n'), ((676, 785), 'flask.render_template', 'render_template', (['"""live.html"""'], {'title': '"""Stonk Scraper"""', 'description': '"""Live scraper for Elon\'s twitter feed."""'}), '(\'live.html\', title=\'Stonk Scraper\', description=\n "Live scraper for Elon\'s twitter feed.")\n', (691, 785), False, 'from flask import render_template\n'), ((1016, 1125), 'flask.render_template', 'render_template', (['"""demo.html"""'], {'title': '"""Stonk Scraper"""', 'description': '"""Demo scraper for Elon\'s twitter feed."""'}), '(\'demo.html\', title=\'Stonk Scraper\', description=\n "Demo scraper for Elon\'s twitter feed.")\n', (1031, 1125), False, 'from flask import render_template\n')]
|
from typing import Optional
from fastapi.encoders import jsonable_encoder
from .models import Application, ApplicationCreate, ApplicationUpdate
def get(*, db_session, app_id: int) -> Optional[Application]:
return db_session.query(Application).filter(Application.id == app_id).one_or_none()
def get_by_name(*, db_session, name: str) -> Optional[Application]:
return db_session.query(Application).filter(Application.name == name).one_or_none()
def get_all(*, db_session):
return db_session.query(Application)
def create(*, db_session, app_in: ApplicationCreate) -> Application:
app = Application(**app_in.dict())
db_session.add(app)
db_session.commit()
return app
def update(*, db_session, app: Application, app_in: ApplicationUpdate) -> Application:
app_data = jsonable_encoder(app)
update_data = app_in.dict(skip_defaults=True)
for field in app_data:
if field in update_data:
setattr(app, field, update_data[field])
db_session.add(app)
db_session.commit()
return app
def delete(*, db_session, app_id: int):
app = db_session.query(Application).filter(Application.id == app_id).one_or_none()
db_session.delete(app)
db_session.commit()
|
[
"fastapi.encoders.jsonable_encoder"
] |
[((803, 824), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['app'], {}), '(app)\n', (819, 824), False, 'from fastapi.encoders import jsonable_encoder\n')]
|
import calendar
import numpy as np
import pandas as pd
import os
import shutil
import tables
import tempfile
import unittest
from datetime import datetime
from phildb.log_handler import LogHandler
class LogHandlerTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.log_file = os.path.join(self.tmp_dir, "log_file.hdf5")
self.create_datetime = calendar.timegm(
datetime(2015, 6, 28, 15, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388620800, np.nan, 0), (1388707200, 3.0, 0)], "U": []}
with LogHandler(self.log_file, "w") as writer:
writer.create_skeleton()
with LogHandler(self.log_file, "a") as writer:
writer.write(log_entries, self.create_datetime)
self.update_datetime = calendar.timegm(
datetime(2015, 8, 1, 16, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388707200, 4.0, 0)], "U": []}
with LogHandler(self.log_file, "a") as writer:
writer.write(log_entries, self.update_datetime)
self.second_update_datetime = calendar.timegm(
datetime(2015, 8, 10, 16, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388707200, 5.0, 0)], "U": []}
with LogHandler(self.log_file, "a") as writer:
writer.write(log_entries, self.second_update_datetime)
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if e.errno != 2: # Code 2: No such file or directory.
raise
def test_logging(self):
log_file = os.path.join(self.tmp_dir, "log_file.hdf5")
create_datetime = calendar.timegm(
datetime(2015, 6, 28, 15, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388620800, 2.0, 0), (1388707200, 3.0, 0)], "U": []}
with LogHandler(log_file, "w") as writer:
writer.create_skeleton()
with LogHandler(log_file, "a") as writer:
writer.write(log_entries, create_datetime)
update_datetime = calendar.timegm(
datetime(2015, 6, 28, 16, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388620800, 3.0, 0), (1388707200, 4.0, 0)], "U": []}
with LogHandler(log_file, "a") as writer:
writer.write(log_entries, update_datetime)
with tables.open_file(log_file, "r") as hdf5_file:
log_grp = hdf5_file.get_node("/data")
self.assertEqual(len(log_grp.log), 4)
self.assertSequenceEqual(
log_grp.log[0], (1388620800, 2.0, 0, create_datetime)
)
self.assertSequenceEqual(
log_grp.log[1], (1388707200, 3.0, 0, create_datetime)
)
self.assertSequenceEqual(
log_grp.log[2], (1388620800, 3.0, 0, update_datetime)
)
self.assertSequenceEqual(
log_grp.log[3], (1388707200, 4.0, 0, update_datetime)
)
def test_nan_logging(self):
# Note: The write code under test is part of the setUp method.
with tables.open_file(self.log_file, "r") as hdf5_file:
log_grp = hdf5_file.get_node("/data")
self.assertSequenceEqual(
log_grp.log[0], (1388620800, -9999, 9999, self.create_datetime)
)
self.assertSequenceEqual(
log_grp.log[1], (1388707200, 3.0, 0, self.create_datetime)
)
self.assertSequenceEqual(
log_grp.log[2], (1388707200, 4.0, 0, self.update_datetime)
)
self.assertEqual(len(log_grp.log), 4)
def test_read_log(self):
data = {}
with LogHandler(self.log_file, "r") as reader:
data["original_data"] = reader.read(self.create_datetime)
data["middle_data"] = reader.read(self.update_datetime)
data["last_data"] = reader.read(self.second_update_datetime)
for k in data.keys():
self.assertEqual(
data[k].index[0],
pd.Timestamp("2014-01-02 00:00:00"),
"Incorrect start date in {0}".format(k),
)
self.assertEqual(
data[k].index[1],
pd.Timestamp("2014-01-03 00:00:00"),
"Incorrect end date in {0}".format(k),
)
self.assertEqual(len(data[k]), 2, "Incorrect length of '{0}'.".format(k))
self.assertTrue(
np.isnan(data[k].value[0]), "Incorrect first value for '{0}'.".format(k)
)
self.assertEqual(data["original_data"].value[1], 3.0)
self.assertEqual(data["middle_data"].value[1], 4.0)
self.assertEqual(data["last_data"].value[1], 5.0)
|
[
"pandas.Timestamp",
"numpy.isnan",
"datetime.datetime",
"tempfile.mkdtemp",
"shutil.rmtree",
"tables.open_file",
"os.path.join",
"phildb.log_handler.LogHandler"
] |
[((285, 303), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (301, 303), False, 'import tempfile\n'), ((328, 371), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""log_file.hdf5"""'], {}), "(self.tmp_dir, 'log_file.hdf5')\n", (340, 371), False, 'import os\n'), ((1638, 1681), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""log_file.hdf5"""'], {}), "(self.tmp_dir, 'log_file.hdf5')\n", (1650, 1681), False, 'import os\n'), ((592, 622), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""w"""'], {}), "(self.log_file, 'w')\n", (602, 622), False, 'from phildb.log_handler import LogHandler\n'), ((685, 715), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""a"""'], {}), "(self.log_file, 'a')\n", (695, 715), False, 'from phildb.log_handler import LogHandler\n'), ((981, 1011), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""a"""'], {}), "(self.log_file, 'a')\n", (991, 1011), False, 'from phildb.log_handler import LogHandler\n'), ((1285, 1315), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""a"""'], {}), "(self.log_file, 'a')\n", (1295, 1315), False, 'from phildb.log_handler import LogHandler\n'), ((1444, 1471), 'shutil.rmtree', 'shutil.rmtree', (['self.tmp_dir'], {}), '(self.tmp_dir)\n', (1457, 1471), False, 'import shutil\n'), ((1894, 1919), 'phildb.log_handler.LogHandler', 'LogHandler', (['log_file', '"""w"""'], {}), "(log_file, 'w')\n", (1904, 1919), False, 'from phildb.log_handler import LogHandler\n'), ((1982, 2007), 'phildb.log_handler.LogHandler', 'LogHandler', (['log_file', '"""a"""'], {}), "(log_file, 'a')\n", (1992, 2007), False, 'from phildb.log_handler import LogHandler\n'), ((2286, 2311), 'phildb.log_handler.LogHandler', 'LogHandler', (['log_file', '"""a"""'], {}), "(log_file, 'a')\n", (2296, 2311), False, 'from phildb.log_handler import LogHandler\n'), ((2392, 2423), 'tables.open_file', 'tables.open_file', (['log_file', '"""r"""'], {}), "(log_file, 'r')\n", (2408, 2423), False, 'import tables\n'), ((3147, 3183), 'tables.open_file', 'tables.open_file', (['self.log_file', '"""r"""'], {}), "(self.log_file, 'r')\n", (3163, 3183), False, 'import tables\n'), ((3748, 3778), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""r"""'], {}), "(self.log_file, 'r')\n", (3758, 3778), False, 'from phildb.log_handler import LogHandler\n'), ((4112, 4147), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-02 00:00:00"""'], {}), "('2014-01-02 00:00:00')\n", (4124, 4147), True, 'import pandas as pd\n'), ((4301, 4336), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-03 00:00:00"""'], {}), "('2014-01-03 00:00:00')\n", (4313, 4336), True, 'import pandas as pd\n'), ((4540, 4566), 'numpy.isnan', 'np.isnan', (['data[k].value[0]'], {}), '(data[k].value[0])\n', (4548, 4566), True, 'import numpy as np\n'), ((432, 464), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(28)', '(15)', '(25)', '(0)'], {}), '(2015, 6, 28, 15, 25, 0)\n', (440, 464), False, 'from datetime import datetime\n'), ((848, 879), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(1)', '(16)', '(25)', '(0)'], {}), '(2015, 8, 1, 16, 25, 0)\n', (856, 879), False, 'from datetime import datetime\n'), ((1151, 1183), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(10)', '(16)', '(25)', '(0)'], {}), '(2015, 8, 10, 16, 25, 0)\n', (1159, 1183), False, 'from datetime import datetime\n'), ((1737, 1769), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(28)', '(15)', '(25)', '(0)'], {}), '(2015, 6, 28, 15, 25, 0)\n', (1745, 1769), False, 'from datetime import datetime\n'), ((2130, 2162), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(28)', '(16)', '(25)', '(0)'], {}), '(2015, 6, 28, 16, 25, 0)\n', (2138, 2162), False, 'from datetime import datetime\n')]
|
from xml.etree.ElementTree import SubElement
from elifecrossref import mime_type, resource_url, tags
def set_component_list(parent, poa_article, crossref_config):
"""
Set the component_list from the article object component_list objects
"""
if not poa_article.component_list:
return
component_list_tag = SubElement(parent, "component_list")
# ignore excluded components based on the configuration settings
component_list = [
comp
for comp in poa_article.component_list
if comp.type not in crossref_config.get("component_exclude_types", [])
]
for comp in component_list:
set_component(component_list_tag, poa_article, comp, crossref_config)
def set_component(parent, poa_article, comp, crossref_config):
component_tag = SubElement(parent, "component")
component_tag.set("parent_relation", "isPartOf")
set_component_titles(component_tag, comp, crossref_config)
set_component_mime_type(component_tag, comp)
set_component_permissions(component_tag, comp, crossref_config)
set_component_doi(component_tag, poa_article, comp, crossref_config)
def set_component_titles(parent, comp, crossref_config):
titles_tag = SubElement(parent, "titles")
title_tag = SubElement(titles_tag, "title")
title_tag.text = comp.title
if comp.subtitle:
set_subtitle(titles_tag, comp, crossref_config.get("face_markup"))
def set_component_mime_type(parent, comp):
if comp.mime_type:
# Convert to allowed mime types for Crossref, if found
if mime_type.crossref_mime_type(comp.mime_type):
format_tag = SubElement(parent, "format")
format_tag.set("mime_type", mime_type.crossref_mime_type(comp.mime_type))
def set_component_permissions(parent, comp, crossref_config):
"""Specific license for the component"""
license_href = crossref_config.get("component_license_ref")
# First check if a license should be added
if not license_href or not comp.permissions:
return
if do_set_component_permissions(comp):
component_ai_program_tag = SubElement(parent, "ai:program")
component_ai_program_tag.set("name", "AccessIndicators")
license_ref_tag = SubElement(component_ai_program_tag, "ai:license_ref")
license_ref_tag.text = license_href
def do_set_component_permissions(comp):
"""decide whether to set a component permissions"""
if not comp.permissions:
return None
for permission in comp.permissions:
# set the component permissions if it has any copyright statement or license value
if permission.get("copyright_statement") or permission.get("license"):
return True
return False
def set_component_doi(parent, poa_article, comp, crossref_config):
if not comp.doi:
return
# Try generating a resource value then continue
resource = resource_url.generate_resource_url(comp, poa_article, crossref_config)
if resource:
doi_data_tag = SubElement(parent, "doi_data")
doi_tag_tag = SubElement(doi_data_tag, "doi")
doi_tag_tag.text = comp.doi
resource_tag = SubElement(doi_data_tag, "resource")
resource_tag.text = resource
def set_subtitle(parent, component, face_markup=None):
tag_name = "subtitle"
# Use <i> tags, not <italic> tags, <b> tags not <bold>
if component.subtitle:
if face_markup is True:
tags.add_inline_tag(parent, tag_name, component.subtitle)
else:
tags.add_clean_tag(parent, tag_name, component.subtitle)
|
[
"elifecrossref.tags.add_inline_tag",
"elifecrossref.tags.add_clean_tag",
"xml.etree.ElementTree.SubElement",
"elifecrossref.resource_url.generate_resource_url",
"elifecrossref.mime_type.crossref_mime_type"
] |
[((335, 371), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""component_list"""'], {}), "(parent, 'component_list')\n", (345, 371), False, 'from xml.etree.ElementTree import SubElement\n'), ((804, 835), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""component"""'], {}), "(parent, 'component')\n", (814, 835), False, 'from xml.etree.ElementTree import SubElement\n'), ((1219, 1247), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""titles"""'], {}), "(parent, 'titles')\n", (1229, 1247), False, 'from xml.etree.ElementTree import SubElement\n'), ((1264, 1295), 'xml.etree.ElementTree.SubElement', 'SubElement', (['titles_tag', '"""title"""'], {}), "(titles_tag, 'title')\n", (1274, 1295), False, 'from xml.etree.ElementTree import SubElement\n'), ((2908, 2978), 'elifecrossref.resource_url.generate_resource_url', 'resource_url.generate_resource_url', (['comp', 'poa_article', 'crossref_config'], {}), '(comp, poa_article, crossref_config)\n', (2942, 2978), False, 'from elifecrossref import mime_type, resource_url, tags\n'), ((1567, 1611), 'elifecrossref.mime_type.crossref_mime_type', 'mime_type.crossref_mime_type', (['comp.mime_type'], {}), '(comp.mime_type)\n', (1595, 1611), False, 'from elifecrossref import mime_type, resource_url, tags\n'), ((2115, 2147), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""ai:program"""'], {}), "(parent, 'ai:program')\n", (2125, 2147), False, 'from xml.etree.ElementTree import SubElement\n'), ((2239, 2293), 'xml.etree.ElementTree.SubElement', 'SubElement', (['component_ai_program_tag', '"""ai:license_ref"""'], {}), "(component_ai_program_tag, 'ai:license_ref')\n", (2249, 2293), False, 'from xml.etree.ElementTree import SubElement\n'), ((3019, 3049), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""doi_data"""'], {}), "(parent, 'doi_data')\n", (3029, 3049), False, 'from xml.etree.ElementTree import SubElement\n'), ((3072, 3103), 'xml.etree.ElementTree.SubElement', 'SubElement', (['doi_data_tag', '"""doi"""'], {}), "(doi_data_tag, 'doi')\n", (3082, 3103), False, 'from xml.etree.ElementTree import SubElement\n'), ((3163, 3199), 'xml.etree.ElementTree.SubElement', 'SubElement', (['doi_data_tag', '"""resource"""'], {}), "(doi_data_tag, 'resource')\n", (3173, 3199), False, 'from xml.etree.ElementTree import SubElement\n'), ((1638, 1666), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""format"""'], {}), "(parent, 'format')\n", (1648, 1666), False, 'from xml.etree.ElementTree import SubElement\n'), ((3450, 3507), 'elifecrossref.tags.add_inline_tag', 'tags.add_inline_tag', (['parent', 'tag_name', 'component.subtitle'], {}), '(parent, tag_name, component.subtitle)\n', (3469, 3507), False, 'from elifecrossref import mime_type, resource_url, tags\n'), ((3534, 3590), 'elifecrossref.tags.add_clean_tag', 'tags.add_clean_tag', (['parent', 'tag_name', 'component.subtitle'], {}), '(parent, tag_name, component.subtitle)\n', (3552, 3590), False, 'from elifecrossref import mime_type, resource_url, tags\n'), ((1707, 1751), 'elifecrossref.mime_type.crossref_mime_type', 'mime_type.crossref_mime_type', (['comp.mime_type'], {}), '(comp.mime_type)\n', (1735, 1751), False, 'from elifecrossref import mime_type, resource_url, tags\n')]
|
"""
"""
from django.contrib import admin
from challenges.models import Challenge
from challenges.models import FilmChallenge
class ChallengeAdmin(admin.ModelAdmin):
list_display = ["task"]
class FilmChallengeAdmin(admin.ModelAdmin):
list_display = ["film"]
admin.site.register(Challenge, ChallengeAdmin)
admin.site.register(FilmChallenge, FilmChallengeAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((268, 314), 'django.contrib.admin.site.register', 'admin.site.register', (['Challenge', 'ChallengeAdmin'], {}), '(Challenge, ChallengeAdmin)\n', (287, 314), False, 'from django.contrib import admin\n'), ((315, 369), 'django.contrib.admin.site.register', 'admin.site.register', (['FilmChallenge', 'FilmChallengeAdmin'], {}), '(FilmChallenge, FilmChallengeAdmin)\n', (334, 369), False, 'from django.contrib import admin\n')]
|
# Copyright 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from typing import List, Union
from loguru import logger
from readability_transformers.file_utils import CachedDataset
from readability_transformers.dataset import Dataset
DATASET_ID = "commonlit"
DATASET_ZIP_URL = 'https://1theta-readability-transformers.s3.us-east-2.amazonaws.com/commonlit.tar.gz'
DATAFILES_META = {
"train": "data/train.csv",
"test": "data/test.csv",
"sample": "data/sample_submission.csv"
}
class CommonLitDataset(Dataset):
def __init__(self, label: str):
"""Loads the CommonLit dataset.
Args:
label (str): CommonLit dataset consists of the "train" dataset and the
"test" dataset used for Kaggle evaluation.
cache (bool): if set to True, caches the train-valid-test split when called. Usually we train the
SentenceTransformer first then train the ReadabilityPrediction model. We usually want to use
the same splitted train-valid-test throughout the whole process (unless doing some sort of ablation component study).
Returns:
data (pd.DataFrame): .csv -> pd.DataFrame instance of the dataset.
"""
super().__init__(DATASET_ID, DATASET_ZIP_URL, DATAFILES_META)
self.cached_dataset = CachedDataset(DATASET_ID, DATASET_ZIP_URL, DATAFILES_META)
data_url = self.cached_dataset.get_datafile_from_id(label)
self.data = pd.read_csv(data_url)
for idx, row in self.data.iterrows():
self.data.loc[idx, "excerpt"] = self.basic_preprocess_text(row["excerpt"])
def basic_preprocess_text(self, text_input: Union[str, List[str]]) -> str:
text = text_input
if isinstance(text_input, str):
text = [text_input]
collect = []
for one_text in text:
one_text = one_text.replace("\n", " ").replace("\t", " ").replace(" ", " ")
one_text = one_text.replace("‘", "'").replace(" – ","—")
one_text = one_text.strip()
collect.append(one_text)
if isinstance(text_input, str):
return collect[0]
else:
return collect
|
[
"pandas.read_csv",
"readability_transformers.file_utils.CachedDataset"
] |
[((1888, 1946), 'readability_transformers.file_utils.CachedDataset', 'CachedDataset', (['DATASET_ID', 'DATASET_ZIP_URL', 'DATAFILES_META'], {}), '(DATASET_ID, DATASET_ZIP_URL, DATAFILES_META)\n', (1901, 1946), False, 'from readability_transformers.file_utils import CachedDataset\n'), ((2036, 2057), 'pandas.read_csv', 'pd.read_csv', (['data_url'], {}), '(data_url)\n', (2047, 2057), True, 'import pandas as pd\n')]
|
from django.urls import path
from . import views
from django.conf.urls import url
from django.contrib.auth.views import LoginView, LogoutView
urlpatterns = [
path('',views.profile, name='profile'),
path('login/',LoginView.as_view(template_name='accounts/login.html'),name='login'),
path('logout/',LogoutView.as_view(template_name='accounts/logout.html')),
path('register/',views.register,name = 'register'),
path('profile/',views.profile, name='profile'),
path('profile/edit/',views.edit_profile,name='edit_profile'),
path('change-password/',views.change_password,name='change_password'),
path('admin/',views.admin_login,name='admin_login'),
path('validate',views.validation,name='validate'),
# path('profile/<int:id>/',views.view_profile,name="profile-detail"),
# path('subscribe/<int:id>/',views.subscribe,name='subscribe'),
# path('sub-success/',views.sub_success,name='sub-success'),
# path('test/',views.test_view,name = 'test'),
]
|
[
"django.contrib.auth.views.LogoutView.as_view",
"django.contrib.auth.views.LoginView.as_view",
"django.urls.path"
] |
[((163, 202), 'django.urls.path', 'path', (['""""""', 'views.profile'], {'name': '"""profile"""'}), "('', views.profile, name='profile')\n", (167, 202), False, 'from django.urls import path\n'), ((373, 423), 'django.urls.path', 'path', (['"""register/"""', 'views.register'], {'name': '"""register"""'}), "('register/', views.register, name='register')\n", (377, 423), False, 'from django.urls import path\n'), ((429, 476), 'django.urls.path', 'path', (['"""profile/"""', 'views.profile'], {'name': '"""profile"""'}), "('profile/', views.profile, name='profile')\n", (433, 476), False, 'from django.urls import path\n'), ((481, 543), 'django.urls.path', 'path', (['"""profile/edit/"""', 'views.edit_profile'], {'name': '"""edit_profile"""'}), "('profile/edit/', views.edit_profile, name='edit_profile')\n", (485, 543), False, 'from django.urls import path\n'), ((547, 618), 'django.urls.path', 'path', (['"""change-password/"""', 'views.change_password'], {'name': '"""change_password"""'}), "('change-password/', views.change_password, name='change_password')\n", (551, 618), False, 'from django.urls import path\n'), ((622, 675), 'django.urls.path', 'path', (['"""admin/"""', 'views.admin_login'], {'name': '"""admin_login"""'}), "('admin/', views.admin_login, name='admin_login')\n", (626, 675), False, 'from django.urls import path\n'), ((679, 730), 'django.urls.path', 'path', (['"""validate"""', 'views.validation'], {'name': '"""validate"""'}), "('validate', views.validation, name='validate')\n", (683, 730), False, 'from django.urls import path\n'), ((221, 275), 'django.contrib.auth.views.LoginView.as_view', 'LoginView.as_view', ([], {'template_name': '"""accounts/login.html"""'}), "(template_name='accounts/login.html')\n", (238, 275), False, 'from django.contrib.auth.views import LoginView, LogoutView\n'), ((310, 366), 'django.contrib.auth.views.LogoutView.as_view', 'LogoutView.as_view', ([], {'template_name': '"""accounts/logout.html"""'}), "(template_name='accounts/logout.html')\n", (328, 366), False, 'from django.contrib.auth.views import LoginView, LogoutView\n')]
|
__author__ = 'royrusso'
import json
import logging
import jmespath
import pytest
LOGGER = logging.getLogger(__name__)
pytest_plugins = ["docker_compose"]
@pytest.mark.es_versions
def test_get_cluster_summary(session_scoped_container_getter, fixture):
fixture.add_all_clusters(session_scoped_container_getter, clear_first=True)
response = fixture.app.get('/api/clusters/%s/_summary' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_SUMMARY, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_health(fixture):
response = fixture.app.get('/api/clusters/%s/_health' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_HEALTH, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_state(fixture):
response = fixture.app.get('/api/clusters/%s/_state' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_STATE, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_stats(fixture):
response = fixture.app.get('/api/clusters/%s/_stats' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_STATS, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_pending_tasks(fixture):
response = fixture.app.get('/api/clusters/%s/_pending_tasks' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_PENDING_TASKS, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_settings(fixture):
response = fixture.app.get('/api/clusters/%s/_settings' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_SETTINGS, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_put_cluster_settings(fixture):
body = {
"transient": {
"discovery.zen.minimum_master_nodes": 1
}
}
response = fixture.app.put('/api/clusters/%s/_settings' % fixture.cluster_name, data=json.dumps(body))
assert 200 == response.status_code
response = fixture.app.get('/api/clusters/%s/_settings' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert jmespath.search("transient.discovery.zen.minimum_master_nodes", res['data'][0]) == "1"
|
[
"jmespath.search",
"logging.getLogger",
"json.dumps"
] |
[((93, 120), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (110, 120), False, 'import logging\n'), ((2786, 2865), 'jmespath.search', 'jmespath.search', (['"""transient.discovery.zen.minimum_master_nodes"""', "res['data'][0]"], {}), "('transient.discovery.zen.minimum_master_nodes', res['data'][0])\n", (2801, 2865), False, 'import jmespath\n'), ((2548, 2564), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (2558, 2564), False, 'import json\n')]
|
import bincopy
import struct
# goal is to extend the functionality and update the default creation parameter of a BinFile object.
class MemFile(bincopy.BinFile):
def __init__(self, filenames=None, overwrite=True, word_size_bits=16, header_encoding='utf-8'):
super().__init__(filenames=filenames, overwrite=overwrite, word_size_bits=word_size_bits, header_encoding=header_encoding)
@staticmethod
def read_evb_bin_file(evb_bin_file, at_address=0x2400):
# EVB BIN file format is little-endian, while bin_copy; the hex file package expects big-endian!
# so here we do the trick with unpack/pack!
with open(evb_bin_file, mode='rb') as file: # b is important -> binary
fileContent = file.read()
fmt_in = "<{:d}H".format (int(len(fileContent)/2))
fmt_out = ">{:d}H".format (int(len(fileContent)/2))
data = list(struct.unpack(fmt_in, fileContent))
bin_data = struct.pack(fmt_out, *data)
bin_file = MemFile()
bin_file.add_binary(bin_data, address=at_address, overwrite=True)
return bin_file
def add_32(self, address, int_data):
if type(int_data) is list:
byte_array = struct.pack('>{}L'.format(len(int_data)), *int_data)
else:
byte_array = struct.pack('>L', int_data)
# silly mistake for 90632 in the eeprom map... LWORD is before HWORD
for i in range(0, len(byte_array), 4):
self.add_binary(byte_array[i+2:i+4], address=address, overwrite=True)
self.add_binary(byte_array[i+0:i+2], address=address+1, overwrite=True)
#
# for new design use this:
# self.add_binary(byte_array, address=address, overwrite=True)
def add_32s(self, address, int_data):
if type(int_data) is list:
byte_array = struct.pack('>{}l'.format(len(int_data)), *int_data)
else:
byte_array = struct.pack('>l', int_data)
# silly mistake for 90632 in the eeprom map... LWORD is before HWORD
for i in range(0, len(byte_array), 4):
self.add_binary(byte_array[i+2:i+4], address=address, overwrite=True)
self.add_binary(byte_array[i+0:i+2], address=address+1, overwrite=True)
#
# for new design use this:
# self.add_binary(byte_array, address=address, overwrite=True)
def add_16(self, address, int_data):
if type(int_data) is list:
byte_array = struct.pack('>{}H'.format(len(int_data)), *int_data)
else:
byte_array = struct.pack('>H', int_data)
self.add_binary(byte_array, address=address, overwrite=True)
def add_16s(self, address, int_data):
if type(int_data) is list:
byte_array = struct.pack('>{}h'.format(len(int_data)), *int_data)
else:
byte_array = struct.pack('>h', int_data)
self.add_binary(byte_array, address=address, overwrite=True)
def write_evb_bin_file(self, evb_bin_file_name = "evb90632.bin"):
if evb_bin_file_name is None:
return None
if not evb_bin_file_name.endswith('.bin'):
evb_bin_file_name += '.bin'
bin_data_in = self.as_binary()
fmt_in = ">{:d}H".format (int(len(bin_data_in)/2))
fmt_out = "<{:d}H".format (int(len(bin_data_in)/2))
data = list(struct.unpack(fmt_in, bin_data_in))
bin_data = struct.pack(fmt_out, *data)
with open(evb_bin_file_name, "wb") as out_file:
out_file.write(bin_data)
return evb_bin_file_name
def write_hex_file(self, hex_file_name = "evb90632.hex"):
if hex_file_name is None:
return self.as_ihex()
if not hex_file_name.endswith('.hex'):
hex_file_name += '.hex'
with open(hex_file_name, "w") as out_file:
out_file.write(self.as_ihex())
return hex_file_name
def get_address_data_pairs(self):
pairs = []
for seg in self.segments:
for addr in range(int((seg.maximum_address-seg.minimum_address) / seg._word_size_bytes)):
addr += int(seg.minimum_address / seg._word_size_bytes)
pairs.append ((addr, self[addr]))
return pairs
|
[
"struct.unpack",
"struct.pack"
] |
[((944, 971), 'struct.pack', 'struct.pack', (['fmt_out', '*data'], {}), '(fmt_out, *data)\n', (955, 971), False, 'import struct\n'), ((3405, 3432), 'struct.pack', 'struct.pack', (['fmt_out', '*data'], {}), '(fmt_out, *data)\n', (3416, 3432), False, 'import struct\n'), ((889, 923), 'struct.unpack', 'struct.unpack', (['fmt_in', 'fileContent'], {}), '(fmt_in, fileContent)\n', (902, 923), False, 'import struct\n'), ((1295, 1322), 'struct.pack', 'struct.pack', (['""">L"""', 'int_data'], {}), "('>L', int_data)\n", (1306, 1322), False, 'import struct\n'), ((1925, 1952), 'struct.pack', 'struct.pack', (['""">l"""', 'int_data'], {}), "('>l', int_data)\n", (1936, 1952), False, 'import struct\n'), ((2554, 2581), 'struct.pack', 'struct.pack', (['""">H"""', 'int_data'], {}), "('>H', int_data)\n", (2565, 2581), False, 'import struct\n'), ((2847, 2874), 'struct.pack', 'struct.pack', (['""">h"""', 'int_data'], {}), "('>h', int_data)\n", (2858, 2874), False, 'import struct\n'), ((3350, 3384), 'struct.unpack', 'struct.unpack', (['fmt_in', 'bin_data_in'], {}), '(fmt_in, bin_data_in)\n', (3363, 3384), False, 'import struct\n')]
|
import numpy as np
import pymc as pm
challenger_data = np.genfromtxt(
"../../Chapter2_MorePyMC/data/challenger_data.csv",
skip_header=1, usecols=[1, 2], missing_values="NA", delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
def p(temp=temperature, alpha=alpha, beta=beta):
return 1.0 / (1. + np.exp(beta * temperature + alpha))
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(260000, 220000, 2)
|
[
"pymc.MAP",
"pymc.Model",
"numpy.genfromtxt",
"pymc.MCMC",
"numpy.isnan",
"pymc.Bernoulli",
"numpy.exp",
"pymc.Normal"
] |
[((57, 193), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../../Chapter2_MorePyMC/data/challenger_data.csv"""'], {'skip_header': '(1)', 'usecols': '[1, 2]', 'missing_values': '"""NA"""', 'delimiter': '""","""'}), "('../../Chapter2_MorePyMC/data/challenger_data.csv',\n skip_header=1, usecols=[1, 2], missing_values='NA', delimiter=',')\n", (70, 193), True, 'import numpy as np\n'), ((378, 414), 'pymc.Normal', 'pm.Normal', (['"""beta"""', '(0)', '(0.001)'], {'value': '(0)'}), "('beta', 0, 0.001, value=0)\n", (387, 414), True, 'import pymc as pm\n'), ((423, 460), 'pymc.Normal', 'pm.Normal', (['"""alpha"""', '(0)', '(0.001)'], {'value': '(0)'}), "('alpha', 0, 0.001, value=0)\n", (432, 460), True, 'import pymc as pm\n'), ((602, 658), 'pymc.Bernoulli', 'pm.Bernoulli', (['"""bernoulli_obs"""', 'p'], {'value': 'D', 'observed': '(True)'}), "('bernoulli_obs', p, value=D, observed=True)\n", (614, 658), True, 'import pymc as pm\n'), ((668, 701), 'pymc.Model', 'pm.Model', (['[observed, beta, alpha]'], {}), '([observed, beta, alpha])\n', (676, 701), True, 'import pymc as pm\n'), ((757, 770), 'pymc.MAP', 'pm.MAP', (['model'], {}), '(model)\n', (763, 770), True, 'import pymc as pm\n'), ((789, 803), 'pymc.MCMC', 'pm.MCMC', (['model'], {}), '(model)\n', (796, 803), True, 'import pymc as pm\n'), ((255, 286), 'numpy.isnan', 'np.isnan', (['challenger_data[:, 1]'], {}), '(challenger_data[:, 1])\n', (263, 286), True, 'import numpy as np\n'), ((553, 587), 'numpy.exp', 'np.exp', (['(beta * temperature + alpha)'], {}), '(beta * temperature + alpha)\n', (559, 587), True, 'import numpy as np\n')]
|
from cs50 import get_float
while True:
owed = round(get_float("Change owed: ") * 100)
if owed > 0:
break
coins = [25, 10, 5, 1]
change = 0
for c in coins:
if owed == 0:
break
change += owed // c
owed = owed % c
print(change)
|
[
"cs50.get_float"
] |
[((57, 83), 'cs50.get_float', 'get_float', (['"""Change owed: """'], {}), "('Change owed: ')\n", (66, 83), False, 'from cs50 import get_float\n')]
|
import datetime
from django.test import TestCase
from django.urls import include, path, reverse
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory, URLPatternsTestCase
from rest_framework import status
from rest_framework.routers import DefaultRouter
from log_api.models import User, Application, Execution, Event, Machine
from log_api import views
# Create your tests here.
class TestUserAPI(APITestCase, URLPatternsTestCase):
router = DefaultRouter()
router.register(r"users", views.UserViewSet)
urlpatterns = [path("", include(router.urls))]
def test_create_user(self):
user_data = dict(name="Murilo", email="<EMAIL>", password="<PASSWORD>")
response = self.client.post("/users/", user_data, format="json", follow=True)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(User.objects.get().name, "Murilo")
def test_retrieve_user_by_id(self):
pass
def test_delete_user(self):
pass
def test_edit_user(self):
pass
|
[
"log_api.models.User.objects.count",
"log_api.models.User.objects.get",
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((487, 502), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (500, 502), False, 'from rest_framework.routers import DefaultRouter\n'), ((581, 601), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (588, 601), False, 'from django.urls import include, path, reverse\n'), ((900, 920), 'log_api.models.User.objects.count', 'User.objects.count', ([], {}), '()\n', (918, 920), False, 'from log_api.models import User, Application, Execution, Event, Machine\n'), ((950, 968), 'log_api.models.User.objects.get', 'User.objects.get', ([], {}), '()\n', (966, 968), False, 'from log_api.models import User, Application, Execution, Event, Machine\n')]
|
###########################################################################################
################ Python script to create a nested JSON object per study ###################
############################## <NAME> 13/12/2021 #################################
# import libraries
import pandas as pd
import os
import json
# Change the working directory
working_directory_path='C:/Users/tkuijpe1/OneDrive - TU Eindhoven/Documents/05_MDR/02_BiomaterialsAtlas/01_TopoChip/01_Studies/01_ALPScreen'
os.chdir(working_directory_path)
# Files should have the format for studyDescription, studyDesign, and studyResults
studyDescription=pd.read_csv('StudyDescription.txt',sep='\t')
studyDesign=pd.read_csv('StudyDesign.txt',sep='\t')
studyResults=pd.read_csv('StudyResults.txt',sep='\t')
study_data=dict()
study_data['Description']={}
study_data['Design']={}
study_data['Results']={}
i=0
for x in studyDescription.Key:
study_data['Description'][x]=studyDescription.Value[i]
i=i+1
j=0
for y in studyDesign.Key:
study_data['Design'][y]=studyDesign.Value[j]
j=j+1
k=0
for z in studyResults.Key:
study_data['Results'][z]=studyResults.Value[k]
k=k+1
with open('ALP_BMA.json', 'w') as json_file:
json.dump(study_data, json_file)
|
[
"pandas.read_csv",
"json.dump",
"os.chdir"
] |
[((502, 534), 'os.chdir', 'os.chdir', (['working_directory_path'], {}), '(working_directory_path)\n', (510, 534), False, 'import os\n'), ((636, 681), 'pandas.read_csv', 'pd.read_csv', (['"""StudyDescription.txt"""'], {'sep': '"""\t"""'}), "('StudyDescription.txt', sep='\\t')\n", (647, 681), True, 'import pandas as pd\n'), ((693, 733), 'pandas.read_csv', 'pd.read_csv', (['"""StudyDesign.txt"""'], {'sep': '"""\t"""'}), "('StudyDesign.txt', sep='\\t')\n", (704, 733), True, 'import pandas as pd\n'), ((746, 787), 'pandas.read_csv', 'pd.read_csv', (['"""StudyResults.txt"""'], {'sep': '"""\t"""'}), "('StudyResults.txt', sep='\\t')\n", (757, 787), True, 'import pandas as pd\n'), ((1220, 1252), 'json.dump', 'json.dump', (['study_data', 'json_file'], {}), '(study_data, json_file)\n', (1229, 1252), False, 'import json\n')]
|
import unittest
import uuid
import datetime
from boto.mturk.question import ExternalQuestion
from _init_environment import SetHostMTurkConnection, external_url, \
config_environment
class Test(unittest.TestCase):
def setUp(self):
config_environment()
def test_create_hit_external(self):
q = ExternalQuestion(external_url=external_url, frame_height=800)
conn = SetHostMTurkConnection()
keywords=['boto', 'test', 'doctest']
create_hit_rs = conn.create_hit(question=q, lifetime=60*65, max_assignments=2, title="Boto External Question Test", keywords=keywords, reward = 0.05, duration=60*6, approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal', 'HITDetail', 'HITQuestion', 'HITAssignmentSummary',])
assert(create_hit_rs.status == True)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"boto.mturk.question.ExternalQuestion",
"_init_environment.config_environment",
"_init_environment.SetHostMTurkConnection"
] |
[((948, 963), 'unittest.main', 'unittest.main', ([], {}), '()\n', (961, 963), False, 'import unittest\n'), ((264, 284), '_init_environment.config_environment', 'config_environment', ([], {}), '()\n', (282, 284), False, 'from _init_environment import SetHostMTurkConnection, external_url, config_environment\n'), ((350, 411), 'boto.mturk.question.ExternalQuestion', 'ExternalQuestion', ([], {'external_url': 'external_url', 'frame_height': '(800)'}), '(external_url=external_url, frame_height=800)\n', (366, 411), False, 'from boto.mturk.question import ExternalQuestion\n'), ((435, 459), '_init_environment.SetHostMTurkConnection', 'SetHostMTurkConnection', ([], {}), '()\n', (457, 459), False, 'from _init_environment import SetHostMTurkConnection, external_url, config_environment\n')]
|
import os
import time
from gtts import gTTS
import pygame
from pygame import USEREVENT
from pygame import mixer
from pygame._sdl2 import get_num_audio_devices, get_audio_device_name
language = input("Language [en, ar... etc]:\n")
accent = input("Accent: [com.au, co.uk, com, ca, co.in, ie, co.za, ca, fr, com.br, pt, com.mx, es]:\n")
if accent == "":
accent = "com"
os.system("cls")
os.system("title Google Text-to-Speech")
def main():
mytext = input("Message: ")
myobj = gTTS(text=mytext, tld=accent, lang=language)
myobj.save("voice.mp3")
mixer.init(devicename="CABLE Input (VB-Audio Virtual Cable)")
mixer.music.load("voice.mp3")
mixer.music.play()
while mixer.music.get_busy():
pass
mixer.music.unload()
os.remove("voice.mp3")
os.system("cls")
while True:
main()
|
[
"os.remove",
"gtts.gTTS",
"pygame.mixer.init",
"pygame.mixer.music.play",
"os.system",
"pygame.mixer.music.unload",
"pygame.mixer.music.get_busy",
"pygame.mixer.music.load"
] |
[((389, 405), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (398, 405), False, 'import os\n'), ((407, 447), 'os.system', 'os.system', (['"""title Google Text-to-Speech"""'], {}), "('title Google Text-to-Speech')\n", (416, 447), False, 'import os\n'), ((509, 553), 'gtts.gTTS', 'gTTS', ([], {'text': 'mytext', 'tld': 'accent', 'lang': 'language'}), '(text=mytext, tld=accent, lang=language)\n', (513, 553), False, 'from gtts import gTTS\n'), ((590, 651), 'pygame.mixer.init', 'mixer.init', ([], {'devicename': '"""CABLE Input (VB-Audio Virtual Cable)"""'}), "(devicename='CABLE Input (VB-Audio Virtual Cable)')\n", (600, 651), False, 'from pygame import mixer\n'), ((659, 688), 'pygame.mixer.music.load', 'mixer.music.load', (['"""voice.mp3"""'], {}), "('voice.mp3')\n", (675, 688), False, 'from pygame import mixer\n'), ((694, 712), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (710, 712), False, 'from pygame import mixer\n'), ((724, 746), 'pygame.mixer.music.get_busy', 'mixer.music.get_busy', ([], {}), '()\n', (744, 746), False, 'from pygame import mixer\n'), ((767, 787), 'pygame.mixer.music.unload', 'mixer.music.unload', ([], {}), '()\n', (785, 787), False, 'from pygame import mixer\n'), ((793, 815), 'os.remove', 'os.remove', (['"""voice.mp3"""'], {}), "('voice.mp3')\n", (802, 815), False, 'import os\n'), ((821, 837), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (830, 837), False, 'import os\n')]
|
import unittest
from Graph import GFD
class Test_GFD(unittest.TestCase):
def setUp(self):
self.gfd = GFD('person')
def test_initial(self):
self.assertEqual(1, len(self.gfd.nodes))
self.assertEqual(0, len(self.gfd.edges))
self.assertEqual('person', self.gfd.nodes[0].type)
def test_add_relation(self):
self.gfd, id1, id2 = self.gfd.add_relation('person', 1, 'product', None, 'create')
self.assertEqual(2, len(self.gfd.nodes))
self.assertEqual(1, len(self.gfd.edges))
self.assertEqual('person', self.gfd.nodes[0].type)
self.assertEqual('product', self.gfd.nodes[1].type)
self.assertEqual('create', self.gfd.edges[0].relation)
self.gfd = self.gfd.add_relation('person', None, 'person', 1, 'father')[0]
self.assertEqual(3, len(self.gfd.nodes))
self.assertEqual(2, len(self.gfd.edges))
self.assertEqual('person', self.gfd.nodes[0].type)
self.assertEqual('product', self.gfd.nodes[1].type)
self.assertEqual('person', self.gfd.nodes[2].type)
self.assertEqual('create', self.gfd.edges[0].relation)
self.assertEqual('father', self.gfd.edges[1].relation)
self.gfd = self.gfd.add_relation('person', 3, 'product', 2, 'create')[0]
self.assertEqual(3, len(self.gfd.nodes))
self.assertEqual(3, len(self.gfd.edges))
self.assertEqual('person', self.gfd.nodes[0].type)
self.assertEqual('product', self.gfd.nodes[1].type)
self.assertEqual('person', self.gfd.nodes[2].type)
self.assertEqual('create', self.gfd.edges[0].relation)
self.assertEqual('father', self.gfd.edges[1].relation)
self.assertEqual('create', self.gfd.edges[2].relation)
def test_has_realtion(self):
self.gfd = self.gfd.add_relation('person', 1, 'product', None, 'create')[0]
self.gfd = self.gfd.add_relation('person', None, 'person', 1, 'father')[0]
self.gfd = self.gfd.add_relation('person', 3, 'product', 2, 'create')[0]
self.assertTrue(self.gfd.has_relation('create', 1, 2))
self.assertTrue(self.gfd.has_relation('father', 3, 1))
self.assertTrue(self.gfd.has_relation('create', 3, 2))
self.assertFalse(self.gfd.has_relation('create', 2, 1))
self.assertFalse(self.gfd.has_relation('father', 1, 2))
self.assertFalse(self.gfd.has_relation('create', 2, 4))
self.assertFalse(self.gfd.has_relation('create', 2, 2))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"Graph.GFD"
] |
[((2524, 2539), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2537, 2539), False, 'import unittest\n'), ((115, 128), 'Graph.GFD', 'GFD', (['"""person"""'], {}), "('person')\n", (118, 128), False, 'from Graph import GFD\n')]
|
"""
Implements agents for TD3 algorithms
"""
from typing import Tuple, Dict, List
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import gym
from tf2rl.common.models import MLPFeatureExtractor
from tf2rl.common.base_class import BasePolicy
tfd = tfp.distributions
class TD3Policy(BasePolicy):
"""
Defines policy object for TD3-like actor critic agent using
a feedforward network
"""
def __init__(self,
obs_space: gym.spaces,
act_space: gym.spaces,
layers: List[int] = [64, 64],
activation: str = "relu",
policy_kwargs=None,
name="DDPGPolicy"):
super(TD3Policy, self).__init__(obs_space,
act_space,
name=name)
self.policy = None
self.q1_fn = None
self.q2_fn = None
self.layers = layers
self.activation = activation
self.policy_kwargs = policy_kwargs
def setup(self, create_pi=True, create_qf=True):
"""Creates actor and critic models"""
if create_pi:
pi_net = MLPFeatureExtractor(self.layers,
self.activation,
name="pi_net")
policy_layer = tf.keras.layers.Dense(self.act_space.shape[0],
activation="tanh",
name="pi_net/pi")
self.policy = tf.keras.Sequential(pi_net.layers+[policy_layer])
if create_qf:
q1_net = MLPFeatureExtractor(self.layers,
self.activation,
name="q1_net")
q1_out = tf.keras.layers.Dense(1,
activation=None,
name="q1_net/qval")
self.q1_fn = tf.keras.Sequential(q1_net.layers+[q1_out])
q2_net = MLPFeatureExtractor(self.layers,
self.activation,
name="q2_net")
q2_out = tf.keras.layers.Dense(1,
activation=None,
name="q2_net/qval")
self.q2_fn = tf.keras.Sequential(q2_net.layers+[q2_out])
return self.policy, self.q1_fn, self.q2_fn
@tf.function
def step(self, obs: np.ndarray, noise_std=None) -> Tuple[tf.Tensor, Dict]:
"""
Return a deterministic action bounded between -1 and 1
"""
action = self.policy(obs)
if noise_std is not None:
noise = tf.random.normal(shape=action.shape, stddev=noise_std)
action += noise
# we need to clip again to -1 and 1 after applying the noise
action = tf.clip_by_value(action, -1, 1)
q1val = self.q1_fn(tf.concat([obs, action], axis=-1))
q2val = self.q2_fn(tf.concat([obs, action], axis=-1))
return action, q1val, q2val
def act(self, state: np.ndarray, noise_std=None) -> tf.Tensor:
"""
Return an action for a given state
"""
return self.step(state, noise_std=noise_std)[0]
@property
def policy_vars(self) -> List[tf.Tensor]:
"""
Returns actor trainable variables
"""
return self.policy.trainable_variables
@property
def critic_vars(self) -> List[tf.Tensor]:
"""
Returns critic trainable variables
"""
return self.q1_fn.trainable_variables+self.q2_fn.trainable_variables
|
[
"tensorflow.clip_by_value",
"tensorflow.random.normal",
"tensorflow.keras.layers.Dense",
"tensorflow.concat",
"tf2rl.common.models.MLPFeatureExtractor",
"tensorflow.keras.Sequential"
] |
[((1193, 1257), 'tf2rl.common.models.MLPFeatureExtractor', 'MLPFeatureExtractor', (['self.layers', 'self.activation'], {'name': '"""pi_net"""'}), "(self.layers, self.activation, name='pi_net')\n", (1212, 1257), False, 'from tf2rl.common.models import MLPFeatureExtractor\n'), ((1368, 1456), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.act_space.shape[0]'], {'activation': '"""tanh"""', 'name': '"""pi_net/pi"""'}), "(self.act_space.shape[0], activation='tanh', name=\n 'pi_net/pi')\n", (1389, 1456), True, 'import tensorflow as tf\n'), ((1576, 1627), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['(pi_net.layers + [policy_layer])'], {}), '(pi_net.layers + [policy_layer])\n', (1595, 1627), True, 'import tensorflow as tf\n'), ((1670, 1734), 'tf2rl.common.models.MLPFeatureExtractor', 'MLPFeatureExtractor', (['self.layers', 'self.activation'], {'name': '"""q1_net"""'}), "(self.layers, self.activation, name='q1_net')\n", (1689, 1734), False, 'from tf2rl.common.models import MLPFeatureExtractor\n'), ((1839, 1900), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'None', 'name': '"""q1_net/qval"""'}), "(1, activation=None, name='q1_net/qval')\n", (1860, 1900), True, 'import tensorflow as tf\n'), ((2012, 2057), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['(q1_net.layers + [q1_out])'], {}), '(q1_net.layers + [q1_out])\n', (2031, 2057), True, 'import tensorflow as tf\n'), ((2078, 2142), 'tf2rl.common.models.MLPFeatureExtractor', 'MLPFeatureExtractor', (['self.layers', 'self.activation'], {'name': '"""q2_net"""'}), "(self.layers, self.activation, name='q2_net')\n", (2097, 2142), False, 'from tf2rl.common.models import MLPFeatureExtractor\n'), ((2247, 2308), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'None', 'name': '"""q2_net/qval"""'}), "(1, activation=None, name='q2_net/qval')\n", (2268, 2308), True, 'import tensorflow as tf\n'), ((2420, 2465), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['(q2_net.layers + [q2_out])'], {}), '(q2_net.layers + [q2_out])\n', (2439, 2465), True, 'import tensorflow as tf\n'), ((2789, 2843), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': 'action.shape', 'stddev': 'noise_std'}), '(shape=action.shape, stddev=noise_std)\n', (2805, 2843), True, 'import tensorflow as tf\n'), ((2967, 2998), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (2983, 2998), True, 'import tensorflow as tf\n'), ((3027, 3060), 'tensorflow.concat', 'tf.concat', (['[obs, action]'], {'axis': '(-1)'}), '([obs, action], axis=-1)\n', (3036, 3060), True, 'import tensorflow as tf\n'), ((3089, 3122), 'tensorflow.concat', 'tf.concat', (['[obs, action]'], {'axis': '(-1)'}), '([obs, action], axis=-1)\n', (3098, 3122), True, 'import tensorflow as tf\n')]
|
from collections import deque
m, n = map(int, input().split())
graph = []
queue = deque([])
for i in range(n):
graph.append(list(map(int, input().split())))
for j in range(m): # 익은 토마토 큐에 저장
if graph[i][j] == 1:
queue.append([i, j])
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
def bfs():
while queue:
x, y = queue.popleft()
for i in range(4):
a = x + dx[i]
b = y + dy[i]
if 0 <= a < n and 0 <= b < m and graph[a][b] == 0:
queue.append([a, b])
graph[a][b] = graph[x][y] + 1
bfs()
answ = 0
for i in graph:
for j in i:
if j == 0:
print(-1)
exit(0)
answ = max(answ, max(i))
print(answ - 1)
|
[
"collections.deque"
] |
[((83, 92), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (88, 92), False, 'from collections import deque\n')]
|
import json
import logging
import time
import numpy as np
from sklearn.svm import OneClassSVM
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import pairwise_distances
from base.base_dataset import BaseADDataset
from networks.main import build_network
class OCSVM(object):
"""A class for One-Class SVM models."""
def __init__(self, kernel='linear', nu=0.1):
"""Init OCSVM instance."""
self.kernel = kernel
self.nu = nu
self.rho = None
self.gamma = None
self.model = OneClassSVM(kernel=kernel, nu=nu)
self.embedding = None
self.results = {
'train_time': None,
'test_time': None,
'test_auc': None,
'test_scores': None
}
def set_embedding(self, dataset, embedding_size=100, pretrained_word_vectors=None, embedding_reduction='mean',
use_tfidf_weights=False, normalize_embedding=False, device: str = 'cpu'):
"""Sets the word embedding for the text data."""
self.embedding = build_network('embedding',
dataset,
embedding_size=embedding_size,
pretrained_model=pretrained_word_vectors,
update_embedding=False,
embedding_reduction=embedding_reduction,
use_tfidf_weights=use_tfidf_weights,
normalize_embedding=normalize_embedding)
self.embedding = self.embedding.to(device)
def train(self, dataset: BaseADDataset, device: str = 'cpu', n_jobs_dataloader: int = 0):
"""Trains the OC-SVM model on the training data."""
logger = logging.getLogger()
train_loader, _ = dataset.loaders(batch_size=64, num_workers=n_jobs_dataloader)
# Training
logger.info('Starting training...')
X = ()
for data in train_loader:
_, text, _, weights = data
text, weights = text.to(device), weights.to(device)
X_batch = self.embedding(text, weights) # X_batch.shape = (batch_size, embedding_size)
X += (X_batch.cpu().data.numpy(),)
X = np.concatenate(X)
# if rbf-kernel, re-initialize svm with gamma minimizing the numerical error
if self.kernel == 'rbf':
self.gamma = 1 / (np.max(pairwise_distances(X)) ** 2)
self.model = OneClassSVM(kernel='rbf', nu=self.nu, gamma=self.gamma)
start_time = time.time()
self.model.fit(X)
self.results['train_time'] = time.time() - start_time
logger.info('Training Time: {:.3f}s'.format(self.results['train_time']))
logger.info('Finished training.')
def test(self, dataset: BaseADDataset, device: str = 'cpu', n_jobs_dataloader: int = 0):
"""Tests the OC-SVM model on the test data."""
logger = logging.getLogger()
_, test_loader = dataset.loaders(batch_size=64, num_workers=n_jobs_dataloader)
# Testing
logger.info('Starting testing...')
idx_label_score = []
X = ()
idxs = []
labels = []
for data in test_loader:
idx, text, label_batch, weights = data
text = text.to(device)
label_batch = label_batch.to(device)
weights = weights.to(device)
X_batch = self.embedding(text, weights) # X_batch.shape = (batch_size, embedding_size)
X += (X_batch.cpu().data.numpy(),)
idxs += idx
labels += label_batch.cpu().data.numpy().astype(np.int64).tolist()
X = np.concatenate(X)
start_time = time.time()
scores = (-1.0) * self.model.decision_function(X)
self.results['test_time'] = time.time() - start_time
scores = scores.flatten()
self.rho = -self.model.intercept_[0]
# Save triples of (idx, label, score) in a list
idx_label_score += list(zip(idxs, labels, scores.tolist()))
self.results['test_scores'] = idx_label_score
# Compute AUC
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
self.results['test_auc'] = roc_auc_score(labels, scores)
# Log results
logger.info('Test AUC: {:.2f}%'.format(100. * self.results['test_auc']))
logger.info('Test Time: {:.3f}s'.format(self.results['test_time']))
logger.info('Finished testing.')
def save_model(self, export_path):
"""Save OC-SVM model to export_path."""
pass
def load_model(self, import_path, device: str = 'cpu'):
"""Load OC-SVM model from import_path."""
pass
def save_results(self, export_json):
"""Save results dict to a JSON-file."""
with open(export_json, 'w') as fp:
json.dump(self.results, fp)
|
[
"json.dump",
"networks.main.build_network",
"sklearn.metrics.pairwise.pairwise_distances",
"logging.getLogger",
"time.time",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.svm.OneClassSVM",
"numpy.concatenate"
] |
[((552, 585), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'kernel': 'kernel', 'nu': 'nu'}), '(kernel=kernel, nu=nu)\n', (563, 585), False, 'from sklearn.svm import OneClassSVM\n'), ((1071, 1335), 'networks.main.build_network', 'build_network', (['"""embedding"""', 'dataset'], {'embedding_size': 'embedding_size', 'pretrained_model': 'pretrained_word_vectors', 'update_embedding': '(False)', 'embedding_reduction': 'embedding_reduction', 'use_tfidf_weights': 'use_tfidf_weights', 'normalize_embedding': 'normalize_embedding'}), "('embedding', dataset, embedding_size=embedding_size,\n pretrained_model=pretrained_word_vectors, update_embedding=False,\n embedding_reduction=embedding_reduction, use_tfidf_weights=\n use_tfidf_weights, normalize_embedding=normalize_embedding)\n", (1084, 1335), False, 'from networks.main import build_network\n'), ((1819, 1838), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1836, 1838), False, 'import logging\n'), ((2306, 2323), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (2320, 2323), True, 'import numpy as np\n'), ((2612, 2623), 'time.time', 'time.time', ([], {}), '()\n', (2621, 2623), False, 'import time\n'), ((3002, 3021), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3019, 3021), False, 'import logging\n'), ((3728, 3745), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (3742, 3745), True, 'import numpy as np\n'), ((3768, 3779), 'time.time', 'time.time', ([], {}), '()\n', (3777, 3779), False, 'import time\n'), ((4248, 4264), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4256, 4264), True, 'import numpy as np\n'), ((4282, 4298), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4290, 4298), True, 'import numpy as np\n'), ((4334, 4363), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'scores'], {}), '(labels, scores)\n', (4347, 4363), False, 'from sklearn.metrics import roc_auc_score\n'), ((2534, 2589), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'kernel': '"""rbf"""', 'nu': 'self.nu', 'gamma': 'self.gamma'}), "(kernel='rbf', nu=self.nu, gamma=self.gamma)\n", (2545, 2589), False, 'from sklearn.svm import OneClassSVM\n'), ((2687, 2698), 'time.time', 'time.time', ([], {}), '()\n', (2696, 2698), False, 'import time\n'), ((3874, 3885), 'time.time', 'time.time', ([], {}), '()\n', (3883, 3885), False, 'import time\n'), ((4955, 4982), 'json.dump', 'json.dump', (['self.results', 'fp'], {}), '(self.results, fp)\n', (4964, 4982), False, 'import json\n'), ((2480, 2501), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (2498, 2501), False, 'from sklearn.metrics.pairwise import pairwise_distances\n')]
|
from os import environ
from hashlib import md5
from aiogram.types import (Message, ReplyKeyboardMarkup, KeyboardButton)
from objects import globals
from objects.globals import dp, config
from db_models.AuthUser import AuthUser
from keyboards.keyboards import MENU_BUTTONS
@dp.message_handler(commands="start")
async def start(message:Message):
globals.state_type = "" #Reset state type
user_data = await AuthUser.objects.filter(user_id=message.from_user.id).all()
if not user_data:
username = message.from_user.username if message.from_user.username is not None else "None"
last_name = message.from_user.last_name if message.from_user.last_name is not None else "None"
first_name = message.from_user.first_name if message.from_user.first_name is not None else "None"
await AuthUser.objects.create(password=(md5(str(message.from_user.id).encode("utf-8")).hexdigest())[:10],
user_id=message.from_user.id, username=username,
last_name=last_name, first_name=first_name)
buttons = list(zip([KeyboardButton(MENU_BUTTONS[k]) for k in range(len(MENU_BUTTONS)) if k % 2 == 0],
[KeyboardButton(MENU_BUTTONS[k]) for k in range(len(MENU_BUTTONS)) if k % 2 != 0]))
if message.from_user.id == int(eval(environ.get("ADMIN_ID"))):
buttons.append([KeyboardButton("📊Статистика")])
buttons = ReplyKeyboardMarkup(resize_keyboard=True, keyboard=buttons)
await message.answer(text=f"🤖Приветствую! Я бот.", reply_markup=buttons)
|
[
"aiogram.types.ReplyKeyboardMarkup",
"aiogram.types.KeyboardButton",
"db_models.AuthUser.AuthUser.objects.filter",
"os.environ.get",
"objects.globals.dp.message_handler"
] |
[((286, 322), 'objects.globals.dp.message_handler', 'dp.message_handler', ([], {'commands': '"""start"""'}), "(commands='start')\n", (304, 322), False, 'from objects.globals import dp, config\n'), ((1481, 1540), 'aiogram.types.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)', 'keyboard': 'buttons'}), '(resize_keyboard=True, keyboard=buttons)\n', (1500, 1540), False, 'from aiogram.types import Message, ReplyKeyboardMarkup, KeyboardButton\n'), ((432, 485), 'db_models.AuthUser.AuthUser.objects.filter', 'AuthUser.objects.filter', ([], {'user_id': 'message.from_user.id'}), '(user_id=message.from_user.id)\n', (455, 485), False, 'from db_models.AuthUser import AuthUser\n'), ((1146, 1177), 'aiogram.types.KeyboardButton', 'KeyboardButton', (['MENU_BUTTONS[k]'], {}), '(MENU_BUTTONS[k])\n', (1160, 1177), False, 'from aiogram.types import Message, ReplyKeyboardMarkup, KeyboardButton\n'), ((1254, 1285), 'aiogram.types.KeyboardButton', 'KeyboardButton', (['MENU_BUTTONS[k]'], {}), '(MENU_BUTTONS[k])\n', (1268, 1285), False, 'from aiogram.types import Message, ReplyKeyboardMarkup, KeyboardButton\n'), ((1380, 1403), 'os.environ.get', 'environ.get', (['"""ADMIN_ID"""'], {}), "('ADMIN_ID')\n", (1391, 1403), False, 'from os import environ\n'), ((1432, 1461), 'aiogram.types.KeyboardButton', 'KeyboardButton', (['"""📊Статистика"""'], {}), "('📊Статистика')\n", (1446, 1461), False, 'from aiogram.types import Message, ReplyKeyboardMarkup, KeyboardButton\n')]
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Chat protocols.
"""
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
deprecatedModuleAttribute(
Version("Twisted", 16, 2, 0),
"There is no replacement for this module.",
"twisted.words.protocols",
"oscar")
|
[
"incremental.Version"
] |
[((226, 254), 'incremental.Version', 'Version', (['"""Twisted"""', '(16)', '(2)', '(0)'], {}), "('Twisted', 16, 2, 0)\n", (233, 254), False, 'from incremental import Version\n')]
|
from cryptography.fernet import Fernet
import os
def write_key():
"""
Generates a key and save it into a file
"""
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
def load_key():
"""
Loads the key from the current directory named `key.key`
"""
return open("key.key", "rb").read()
def encrypt(filename, key):
"""
Given a filename (str) and key (bytes), it encrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read all file data
file_data = file.read()
# encrypt data
encrypted_data = f.encrypt(file_data)
# write the encrypted file
with open(filename, "wb") as file:
file.write(encrypted_data)
def decrypt(filename, key):
"""
Given a filename (str) and key (bytes), it decrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read the encrypted data
encrypted_data = file.read()
# decrypt data
decrypted_data = f.decrypt(encrypted_data)
# write the original file
with open(filename, "wb") as file:
file.write(decrypted_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simple File Encryptor Script")
parser.add_argument("file", help="File to encrypt/decrypt")
parser.add_argument("-g", "--generate-key", dest="generate_key", action="store_true",
help="Whether to generate a new key or use existing")
parser.add_argument("-e", "--encrypt", action="store_true",
help="Whether to encrypt the file, only -e or -d can be specified.")
parser.add_argument("-d", "--decrypt", action="store_true",
help="Whether to decrypt the file, only -e or -d can be specified.")
args = parser.parse_args()
file = args.file
generate_key = args.generate_key
if generate_key:
write_key()
# load the key
key = load_key()
encrypt_ = args.encrypt
decrypt_ = args.decrypt
if encrypt_ and decrypt_:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
elif encrypt_:
encrypt(file, key)
elif decrypt_:
decrypt(file, key)
else:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
|
[
"cryptography.fernet.Fernet",
"cryptography.fernet.Fernet.generate_key",
"argparse.ArgumentParser"
] |
[((138, 159), 'cryptography.fernet.Fernet.generate_key', 'Fernet.generate_key', ([], {}), '()\n', (157, 159), False, 'from cryptography.fernet import Fernet\n'), ((498, 509), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (504, 509), False, 'from cryptography.fernet import Fernet\n'), ((908, 919), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (914, 919), False, 'from cryptography.fernet import Fernet\n'), ((1262, 1329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple File Encryptor Script"""'}), "(description='Simple File Encryptor Script')\n", (1285, 1329), False, 'import argparse\n')]
|
from pypom import Page, Region
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
class Base(Page):
_url = '{base_url}/{locale}'
_amo_header = (By.CLASS_NAME, 'Header-title')
def __init__(self, selenium, base_url, locale='en-US', **kwargs):
super(Base, self).__init__(
selenium, base_url, locale=locale, timeout=30, **kwargs)
def wait_for_page_to_load(self):
self.wait.until(
lambda _: self.find_element(*self._amo_header).is_displayed())
return self
@property
def header(self):
return Header(self)
@property
def logged_in(self):
"""Returns True if a user is logged in"""
return self.is_element_displayed(*self.header._user_locator)
def search_for(self, term):
return self.header.search_for(term)
def login(self, email, password):
login_page = self.header.click_login()
login_page.login(email, password)
self.wait.until(lambda _: self.logged_in)
def logout(self):
self.header.click_logout()
class Header(Region):
_root_locator = (By.CLASS_NAME, 'Header')
_header_title_locator = (By.CLASS_NAME, 'Header-title')
_explore_locator = (By.CSS_SELECTOR, '.SectionLinks > li:nth-child(1) \
> a:nth-child(1)')
_firefox_logo_locator = (By.CLASS_NAME, 'Header-title')
_extensions_locator = (By.CSS_SELECTOR, '.SectionLinks \
> li:nth-child(2) > a:nth-child(1)')
_login_locator = (By.CLASS_NAME, 'Header-authenticate-button')
_logout_locator = (By.CSS_SELECTOR, '.DropdownMenu-items .Header-logout-button')
_themes_locator = (By.CSS_SELECTOR, '.SectionLinks > li:nth-child(3) > \
a:nth-child(1)')
_user_locator = (By.CSS_SELECTOR,
'.Header-user-and-external-links .DropdownMenu-button-text')
_search_textbox_locator = (By.CLASS_NAME, 'AutoSearchInput-query')
def click_explore(self):
self.find_element(*self._firefox_logo_locator).click()
def click_extensions(self):
self.find_element(*self._extensions_locator).click()
from pages.desktop.extensions import Extensions
return Extensions(
self.selenium, self.page.base_url).wait_for_page_to_load()
def click_themes(self):
self.find_element(*self._themes_locator).click()
from pages.desktop.themes import Themes
return Themes(
self.selenium, self.page.base_url).wait_for_page_to_load()
def click_login(self):
self.find_element(*self._login_locator).click()
from pages.desktop.login import Login
return Login(self.selenium, self.page.base_url)
def click_logout(self):
user = self.find_element(*self._user_locator)
logout = self.find_element(*self._logout_locator)
action = ActionChains(self.selenium)
action.move_to_element(user)
action.click()
action.pause(2)
action.move_to_element(logout)
action.pause(2)
action.click(logout)
action.perform()
self.wait.until(lambda s: self.is_element_displayed(
*self._login_locator))
def search_for(self, term):
textbox = self.find_element(*self._search_textbox_locator)
textbox.click()
textbox.send_keys(term)
# Send 'enter' since the mobile page does not have a submit button
textbox.send_keys(u'\ue007')
from pages.desktop.search import Search
return Search(self.selenium, self.page).wait_for_page_to_load()
|
[
"pages.desktop.themes.Themes",
"selenium.webdriver.common.action_chains.ActionChains",
"pages.desktop.login.Login",
"pages.desktop.extensions.Extensions",
"pages.desktop.search.Search"
] |
[((2724, 2764), 'pages.desktop.login.Login', 'Login', (['self.selenium', 'self.page.base_url'], {}), '(self.selenium, self.page.base_url)\n', (2729, 2764), False, 'from pages.desktop.login import Login\n'), ((2923, 2950), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['self.selenium'], {}), '(self.selenium)\n', (2935, 2950), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((2268, 2313), 'pages.desktop.extensions.Extensions', 'Extensions', (['self.selenium', 'self.page.base_url'], {}), '(self.selenium, self.page.base_url)\n', (2278, 2313), False, 'from pages.desktop.extensions import Extensions\n'), ((2500, 2541), 'pages.desktop.themes.Themes', 'Themes', (['self.selenium', 'self.page.base_url'], {}), '(self.selenium, self.page.base_url)\n', (2506, 2541), False, 'from pages.desktop.themes import Themes\n'), ((3579, 3611), 'pages.desktop.search.Search', 'Search', (['self.selenium', 'self.page'], {}), '(self.selenium, self.page)\n', (3585, 3611), False, 'from pages.desktop.search import Search\n')]
|
# Generated by Django 3.2 on 2021-04-23 22:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archives', '0014_alter_contact_message'),
]
operations = [
migrations.CreateModel(
name='Partner',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70)),
('image', models.FileField(null=True, upload_to='patron_images')),
('location', models.CharField(max_length=70)),
],
),
]
|
[
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.FileField"
] |
[((333, 429), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (352, 429), False, 'from django.db import migrations, models\n'), ((453, 484), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (469, 484), False, 'from django.db import migrations, models\n'), ((513, 567), 'django.db.models.FileField', 'models.FileField', ([], {'null': '(True)', 'upload_to': '"""patron_images"""'}), "(null=True, upload_to='patron_images')\n", (529, 567), False, 'from django.db import migrations, models\n'), ((599, 630), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (615, 630), False, 'from django.db import migrations, models\n')]
|
# Generated by Django 2.1.9 on 2019-10-03 13:27
from django.db import migrations, models
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
("com", "0004_compage_is_interest_group"),
]
operations = [
migrations.AddField(
model_name="compage",
name="cropping",
field=image_cropping.fields.ImageRatioField(
"picture",
"770x300",
adapt_rotation=False,
allow_fullsize=False,
free_crop=False,
help_text=None,
hide_image_field=False,
size_warning=False,
verbose_name="Beskjæring",
),
),
migrations.AddField(
model_name="compage",
name="picture",
field=models.ImageField(
blank=True,
help_text="Bilder som er større enn 770x300 px ser best ut. Du kan beskjære bildet etter opplasting.",
null=True,
upload_to="uploads/news_pictures",
verbose_name="Bilde",
),
),
]
|
[
"django.db.models.ImageField"
] |
[((858, 1068), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'help_text': '"""Bilder som er større enn 770x300 px ser best ut. Du kan beskjære bildet etter opplasting."""', 'null': '(True)', 'upload_to': '"""uploads/news_pictures"""', 'verbose_name': '"""Bilde"""'}), "(blank=True, help_text=\n 'Bilder som er større enn 770x300 px ser best ut. Du kan beskjære bildet etter opplasting.'\n , null=True, upload_to='uploads/news_pictures', verbose_name='Bilde')\n", (875, 1068), False, 'from django.db import migrations, models\n')]
|
import json
import os
from cond_stmt import CondStmt
from trace import Trace
import base64
class Importer:
INPUT_NAME = "id_"
found_conditions = set()
files = None
def __init__(self, folder):
self.folder = folder
self.files = self.get_files()
def get_files(self):
files = os.listdir(self.folder)
files.sort()
response = []
for input_file in files:
if input_file[:len(self.INPUT_NAME)] == self.INPUT_NAME:
input_id = input_file[len(self.INPUT_NAME):]
response.append((input_file, "track_id_"+input_id+".json"))
return response
def get_reachableness(self, offsets, reachableness):
temp = []
for offset in offsets:
begin_index = offset['begin']
end_index = offset['end']
temp += reachableness[begin_index:end_index]
return sum(temp)
def update_reachableness(self, offsets, reachableness):
for offset in offsets:
begin_index = offset['begin']
end_index = offset['end']
if len(reachableness) < end_index:
reachableness += [0 for i in range(end_index-len(reachableness))]
for i in range(begin_index, end_index):
reachableness[i] += 1
return reachableness
def read_input_file(self, fileLocation):
content = None
with open(self.folder+fileLocation, 'rb') as input_file:
content = input_file.read()
return content
def read_fuzz_file(self, fileLocation):
content = None
reachableness = []
with open(self.folder+fileLocation, 'r') as input_file:
content = input_file.read()
jsonData = json.loads(content)
response = []
depth = 0
for item in jsonData:
item['depth'] = depth
depth += 1
condition = CondStmt.fromJson(item)
reachableness = self.update_reachableness(condition.offsets, reachableness)
condition.reachableness = self.get_reachableness(condition.offsets, reachableness)
response.append(condition)
return response
def get_traces_iterator(self):
#Processing hangs can take up to a week per trace, skip those
hang_files = os.listdir(self.folder+'/../hangs')
hangs = list()
print("Collecting hangs")
for hang_file in hang_files:
with open(self.folder+'/../hangs/'+hang_file,'rb') as hang:
hangs.append(hang.read())
print("Collected hangs")
number_of_files = 1
total_files = len(self.files)
for (input_file, trace_file) in self.files:
try:
trace_content = self.read_fuzz_file(trace_file)
except:
pass
if not trace_content or not len(trace_content):
print("Skipped %d/%d files due to empty trace" % (number_of_files, total_files))
number_of_files +=1
continue
trace = Trace(self.read_input_file(input_file), trace_content)
if trace.getInput() in hangs:
print("Skipped %d/%d files" % (number_of_files, total_files))
number_of_files +=1
continue
print("Processed %d/%d files" % (number_of_files, total_files))
number_of_files +=1
yield trace
def get_traces_length(self):
return len(self.files)
|
[
"cond_stmt.CondStmt.fromJson",
"os.listdir",
"json.loads"
] |
[((336, 359), 'os.listdir', 'os.listdir', (['self.folder'], {}), '(self.folder)\n', (346, 359), False, 'import os\n'), ((1810, 1829), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (1820, 1829), False, 'import json\n'), ((2392, 2429), 'os.listdir', 'os.listdir', (["(self.folder + '/../hangs')"], {}), "(self.folder + '/../hangs')\n", (2402, 2429), False, 'import os\n'), ((1987, 2010), 'cond_stmt.CondStmt.fromJson', 'CondStmt.fromJson', (['item'], {}), '(item)\n', (2004, 2010), False, 'from cond_stmt import CondStmt\n')]
|