content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright (c) 2020, FADA-CATEC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utility classes and methods to run a projections visualizer."""
import rospy
import math
import numpy as np
from math import sin, cos, pi, radians
from scipy.spatial.transform import Rotation
from z_laser_zlp1.zlp_keyboard import KeyboardParameters
from z_laser_zlp1.zlp_utils import CoordinateSystemParameters, ProjectionElementParameters
from geometry_msgs.msg import Point, Quaternion, Vector3, Pose, Quaternion
from visualization_msgs.msg import Marker, MarkerArray
from std_srvs.srv import Trigger, TriggerResponse
from z_laser_msgs.msg import Figure
from z_laser_msgs.srv import CoordinateSystem, CoordinateSystemResponse
from z_laser_msgs.srv import CoordinateSystemName, CoordinateSystemNameResponse
from z_laser_msgs.srv import CoordinateSystemShow, CoordinateSystemShowResponse
from z_laser_msgs.srv import CoordinateSystemList, CoordinateSystemListResponse
from z_laser_msgs.srv import ProjectionElement, ProjectionElementResponse
class ZLPVisualizer(object):
"""This class implement the functions related with projection elements.
Attributes:
cs_marker_array (list): coordinate systems' markers list (origin axes and frame of each system)
pe_marker_array (list): markers list of projection elements
active_cs (str): name of active reference system
cs_reference (str): auxiliar variable to differentiate and find the origin axes and frames markers
STD_WAIT_TIME (int): predefined number of projection seconds in reference system definition
figures_list (list): list with the figures' identificator names
"""
def __init__(self):
"""Initialize the ZLPVisualizer object."""
self.cs_marker_array = MarkerArray()
self.pe_marker_array = MarkerArray()
self.active_cs = ""
self.cs_reference = ""
self.STD_WAIT_TIME = CoordinateSystemParameters().DEFAULT_SHOW_TIME
self.figures_list = ProjectionElementParameters().figures_list
self.scale_factor = 1
def open_services(self):
"""Open ROS services for visualizer."""
self.start_proj = rospy.Service('projection_start', Trigger, self.projection_start_cb)
self.stop_proj = rospy.Service('projection_stop', Trigger, self.projection_stop_cb)
self.manual_cs = rospy.Service('define_coordinate_system', CoordinateSystem, self.manual_define_coord_sys_cb)
self.set_cs = rospy.Service('set_coordinate_system', CoordinateSystemName, self.set_coord_sys_cb)
self.rem_cs = rospy.Service('remove_coordinate_system', CoordinateSystemName, self.remove_coord_sys_cb)
self.show_cs = rospy.Service('show_active_coordinate_system', CoordinateSystemShow, self.show_coord_sys_cb)
self.hide_proj_elem = rospy.Service('hide_projection_element', ProjectionElement, self.hide_proj_elem_cb)
self.unhide_proj_elem = rospy.Service('unhide_projection_element', ProjectionElement, self.unhide_proj_elem_cb)
self.remove_proj_elem = rospy.Service('remove_projection_element', ProjectionElement, self.remove_proj_elem_cb)
self.add_proj_elem = rospy.Subscriber("add_projection_element", Figure, self.add_fig_cb)
self.monit_proj_elem = rospy.Subscriber("monitor_projection_element", Figure, self.init_keyboard_listener_cb)
def projection_start_cb(self, req):
"""Callback of ROS service to start projection of elements related to the active reference system on the surface.
Args:
req (object): trigger request ROS service object
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if not self.active_cs:
return TriggerResponse(False, "No Coordinate System set as active.")
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(self.active_cs)>-1:
self.pe_marker_array.markers[i].action = Marker.ADD
return TriggerResponse(True, "Projection started.")
def projection_stop_cb(self, req):
"""Callback of ROS service to stop projection of all elements.
Args:
req (object): trigger request ROS service object
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i in range(len(self.pe_marker_array.markers)):
self.pe_marker_array.markers[i].action = Marker.DELETE
return TriggerResponse(True, "Projection stopped.")
def manual_define_coord_sys_cb(self, req):
"""Callback of ROS service to define a new reference system, stating the points coordinates manually by the user.
Args:
req (object): object with the necessary info to define a new coordinate system
Returns:
tuple[list, bool, str]: the first value in the returned tuple is a list of the user reference points T0, T1, T2, T3,
the second is a bool success value and the third s an information message string
"""
for marker in self.cs_marker_array.markers:
if req.name in marker.ns:
return CoordinateSystemResponse([], False, "Coordinate System already exists.")
self.active_cs = req.name
axis_x_marker, axis_y_marker = self.coord_sys_axes(req)
self.cs_marker_array.markers.append(axis_x_marker)
self.cs_marker_array.markers.append(axis_y_marker)
self.cs_marker_array.markers.append(self.coord_sys_frame(req))
self.cs_reference = "_origin"
self.timer_secs = self.STD_WAIT_TIME
self.update_cs_markers()
return CoordinateSystemResponse([], True, "Coordinate System added manually.")
def timer_cb(self, timer):
"""Timer for controlling the projection pause between the reference systems's different markers."""
for i in range(len(self.cs_marker_array.markers)):
self.cs_marker_array.markers[i].action = Marker.DELETE
self.update_cs_markers()
def update_cs_markers(self):
"""Change projection between origin axes and frame markers."""
for marker in self.cs_marker_array.markers:
if (self.active_cs + self.cs_reference) in marker.ns:
marker.action = Marker.ADD
if self.cs_reference in ["_origin","_frame"]:
rospy.Timer(rospy.Duration(self.timer_secs), self.timer_cb, oneshot=True)
self.cs_reference = "_frame" if self.cs_reference == "_origin" else "empty"
def base_marker(self, cs_name):
"""Initialize the common and basic parameters of a marker.
Args:
cs_name (object): name of the reference system with which the marker is associated
Returns:
object: marker initialized
"""
# define marker common fields
marker = Marker()
marker.type = Marker.LINE_STRIP
marker.action = Marker.DELETE
marker.scale.x = 0.01 # Vector3(0.01, 0.01, 0)
marker.color.g = 1.0
marker.color.a = 1.0
marker.header.frame_id = cs_name
marker.pose.orientation = Quaternion(0,0,0,1)
return marker
def coord_sys_axes(self, cs_points):
"""Create the origin axes markers.
Args:
cs_points (object): object with the x,y,z position of the reference points from the reference system
Returns:
tuple[object, object]: the first value in the returned tuple is the x-axis marker and
the second is the y-axis marker
"""
# read axes points
orig = Point() # axes origin point
orig.x = cs_points.P[0].x * 0.001
orig.y = cs_points.P[0].y * 0.001
axis_x = Point() # axis x line end point
axis_x.x = cs_points.P[1].x * 0.001
axis_x.y = orig.y
axis_y = Point() # axis y line end point
axis_y.x = orig.x
axis_y.y = cs_points.P[2].y * 0.001
# create one marker for each axis line
# and append the correspondent points
axis_x_marker = self.base_marker("[P]")
axis_y_marker = self.base_marker("[P]")
axis_x_marker.points.append(orig)
axis_x_marker.points.append(axis_x)
axis_y_marker.points.append(orig)
axis_y_marker.points.append(axis_y)
# update frame and namespace
axis_x_marker.ns = cs_points.name + "_origin/polyline/axis_x"
axis_y_marker.ns = cs_points.name + "_origin/polyline/axis_y"
return axis_x_marker, axis_y_marker
def coord_sys_frame(self, cs_points):
"""Create the frame marker.
Args:
cs_points (object): object with the x,y,z position of the reference points from the reference system
Returns:
object: frame marker
"""
frame = self.base_marker("[P]")
# read frame points
for i in [0,1,2,3,0]:
point = Point()
point.x = cs_points.P[i].x * 0.001
point.y = cs_points.P[i].y * 0.001
frame.points.append(point)
frame.ns = cs_points.name + "_frame/polyline/T1_T2_T3_T4"
return frame
def set_coord_sys_cb(self, req):
"""Callback of ROS service to set the indicated reference system as 'active reference system'.
Args:
req (object): object with the necessary parameters to identify a coordinate system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
self.active_cs = req.name
return CoordinateSystemNameResponse(True, "Coordinate System set as active.")
def show_coord_sys_cb(self, req):
"""Callback of ROS service to project reference points, origin axes and frame of the active reference system.
Args:
req (object): object with the necessary parameters to identify a reference system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if not self.active_cs:
return CoordinateSystemShowResponse(False, "None Coordinate System is set.")
if not req.secs > 0:
return CoordinateSystemShowResponse(False, "Seconds projection is set to 0.")
self.timer_secs = req.secs
self.cs_reference = "_origin"
self.update_cs_markers()
return CoordinateSystemShowResponse(True, "Active Coordinate System showed correctly.")
def remove_coord_sys_cb(self, req):
"""Callback of ROS service to remove a reference system.
Args:
req (object): object with the necessary parameters to identify a reference system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if any(req.name in cs.ns for cs in self.cs_marker_array.markers):
self.cs_marker_array.markers = [cs for cs in self.cs_marker_array.markers if cs.ns.find(req.name)==-1]
self.pe_marker_array.markers = [pe for pe in self.pe_marker_array.markers if pe.ns.find(req.name)==-1]
if req.name == self.active_cs:
self.active_cs = ""
return CoordinateSystemNameResponse(True, "Coordinate System removed.")
else:
return CoordinateSystemNameResponse(False, "Coordinate System does not exist.")
def add_fig_cb(self, msg):
"""Callback of ROS topic to define a new projection element.
Args:
msg (object): object with the necessary parameters to define a new projection element
"""
# define marker common fields
marker = self.base_marker(self.active_cs)
step = self.compute_step()
marker.pose.position.x = msg.position.x * step
marker.pose.position.y = msg.position.y * step
if msg.figure_type == Figure.POLYLINE:
length = msg.size[0] * step
angle = radians(msg.angle[0])
# middle point for polyline ()
marker.pose.position.x += length/2*cos(angle)
marker.pose.position.y += length/2*sin(angle)
figure = self.line_eq(length, angle)
elif msg.figure_type == Figure.CIRCLE:
radius = msg.size[0] * step
figure = self.circle_eq(radius, 0.0, 2*pi)
elif msg.figure_type == Figure.ARC:
radius = msg.size[0] * step
start_angle = radians(msg.angle[0])
end_angle = radians(msg.angle[1])
figure = self.circle_eq(radius, start_angle, end_angle)
elif msg.figure_type == Figure.OVAL:
wide_size = msg.size[0] * step
height_size = msg.size[1] * step
angle = radians(msg.angle[0])
figure = self.oval_eq(wide_size, height_size, angle)
elif msg.figure_type == Figure.TEXT:
angle = radians(msg.angle[0])
marker.type = Marker.TEXT_VIEW_FACING
marker.scale.z = msg.size[0] * step
# overwrite some marker fields for text
rotation = Rotation.from_euler('xyz', [0, 0, angle], degrees=False)
marker.pose.orientation = Quaternion(*rotation.as_quat())
marker.text = msg.text
if msg.figure_type != Figure.TEXT:
marker.points = figure
marker.ns = self.active_cs+ "/" + msg.projection_group + self.figures_list[msg.figure_type] + msg.figure_name
self.pe_marker_array.markers.append(marker)
def line_eq(self, length, ang):
"""Calculate points array of a new line from its parametrics equation.
Args:
length (float): line length
ang (float): line angle slope
Returns:
list: list of calculated points
"""
line_points = []
delta_th = 0.01
for th in np.arange(-length/2, length/2, delta_th):
point = Point()
point.x = th * cos(ang)
point.y = th * sin(ang)
line_points.append(point)
return line_points
def circle_eq(self, radius, start_ang, end_ang):
"""Calculate points array of a new circle or arc from its parametrics equation.
Args:
radius (float): circle or arc radius
start_ang (float): arc start angle
end_ang (float): arc end angle
Returns:
list: list of calculated points
"""
circle_points = []
delta_th = 0.01
for th in np.arange(start_ang, end_ang, delta_th):
point = Point()
point.x = radius * sin(th)
point.y = radius * cos(th)
circle_points.append(point)
return circle_points
def oval_eq(self, a, b, angle):
"""Calculate points array of a new ellipse from its parametrics equation.
Args:
a (float): ellipse width
b (float): ellipse height
angle (float): rotation angle
Returns:
list: list of calculated points
"""
oval_points = []
delta_th = 0.01
for th in np.arange(0.0, 2*pi+delta_th, delta_th):
point = Point()
point.x = a * cos(th)*cos(angle) - b * sin(th)*sin(angle)
point.y = a * cos(th)*sin(angle) + b * sin(th)*cos(angle)
oval_points.append(point)
return oval_points
def hide_proj_elem_cb(self, req):
"""Callback of ROS service to hide specific projection element from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers[i].color.a = 0
return ProjectionElementResponse(True, "Figure hidden correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def unhide_proj_elem_cb(self, req):
"""Callback of ROS service to unhide specific projection element from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers[i].color.a = 1
return ProjectionElementResponse(True, "Figure unhidden correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def remove_proj_elem_cb(self, req):
"""Callback of ROS service to remove specific figure from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers.pop(i)
return ProjectionElementResponse(True, "Figure removed correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def translate(self, marker, dx=0, dy=0, dz=0):
"""Translate marker from one position to another.
Args:
marker (object): marker object to translate
dx (float): offset in x direction
dy (float): offset in y direction
dz (float): offset in z direction
"""
marker.action = Marker.DELETE
marker.pose.position.x += dx
marker.pose.position.y += dy
marker.pose.position.z += dz
marker.action = Marker.ADD
def compute_step(self):
"""Calculate the resolution step of the active reference system.
Returns:
float: resolution step (real dimension system {P} in mm / user dimension system {T})
"""
res = rospy.get_param('/zlaser/coordinate_system_resolution', 1000)
P0_x = rospy.get_param('/zlaser/P0/x', 1000) * 0.001
P1_x = rospy.get_param('/zlaser/P1/x', 1000) * 0.001
step = (P1_x - P0_x)/res
return step
def rotate(self, marker, angle):
"""Rotate marker an angle.
Args:
marker (object): marker object to rotate
angle (float): rotation angle [degrees]
"""
marker.action = Marker.DELETE
q = marker.pose.orientation
rotation = Rotation.from_euler('xyz', [0, 0, angle], degrees=True)
q_rot = Quaternion(*rotation.as_quat())
marker.pose.orientation = self.quat_multiply(q_rot, q)
marker.action = Marker.ADD
def quat_multiply(self, q1, q0):
"""Calculate the product of two quaternions.
Returns:
object: object with the x,y,z,w values of the result quaternion
"""
return Quaternion( q1.x*q0.w + q1.y*q0.z - q1.z*q0.y + q1.w*q0.x,
-q1.x*q0.z + q1.y*q0.w + q1.z*q0.x + q1.w*q0.y,
q1.x*q0.y - q1.y*q0.x + q1.z*q0.w + q1.w*q0.z,
-q1.x*q0.x - q1.y*q0.y - q1.z*q0.z + q1.w*q0.w)
def scale(self, marker, factor, proj_elem_params):
"""Scale size of marker by redefining figure equation.
Args:
marker (object): marker object to scale
factor (float): scale factor
proj_elem_params (object): object with the parameters of the projection element to transform
"""
marker.action = Marker.DELETE
self.scale_factor *= factor # update factor
size = proj_elem_params.size[0]*0.001 * self.scale_factor
angle = radians(proj_elem_params.angle[0])
if proj_elem_params.figure_type == Figure.POLYLINE:
figure = self.line_eq(size, angle) # size is line length
elif proj_elem_params.figure_type == Figure.CIRCLE:
figure = self.circle_eq(size, 0.0, 2*pi) # size is circle radius
elif proj_elem_params.figure_type == Figure.ARC:
end_ang = radians(proj_elem_params.angle[1])
figure = self.circle_eq(size, angle, end_ang) # size is arc radius
elif proj_elem_params.figure_type == Figure.OVAL:
height_size = proj_elem_params.size[1]*0.001 * self.scale_factor
figure = self.oval_eq(size, height_size, angle) # size is oval width
elif proj_elem_params.figure_type == Figure.TEXT:
marker.scale.z = marker.scale.z*0.001 * self.scale_factor
figure = []
marker.points = figure
marker.action = Marker.ADD
def on_press(self, key, marker, proj_elem_params):
"""Check if the key pressed if one of the list and execute the respective tasks.
Args:
key (enum): key pressed
marker (object): monitored marker object
proj_elem_params (object): object with the parameters of the projection element to monitor
"""
if any([key in COMBO for COMBO in self.keyboard_params.COMBINATIONS]):
self.current.add(key)
if self.current == self.keyboard_params.KEY_UP:
rospy.loginfo("VIZ_KEY_UP")
self.translate(marker, dy=self.compute_step())
elif self.current == self.keyboard_params.KEY_DOWN:
rospy.loginfo("VIZ_KEY_DOWN")
self.translate(marker, dy=-self.compute_step())
elif self.current == self.keyboard_params.KEY_LEFT:
rospy.loginfo("VIZ_KEY_LEFT")
self.translate(marker, dx=-self.compute_step())
elif self.current == self.keyboard_params.KEY_RIGHT:
rospy.loginfo("VIZ_KEY_RIGHT")
self.translate(marker, dx=self.compute_step())
elif self.current == self.keyboard_params.KEY_PLUS:
rospy.loginfo("VIZ_KEY_PLUS")
self.scale(marker, 2, proj_elem_params)
elif self.current == self.keyboard_params.KEY_MINUS:
rospy.loginfo("VIZ_KEY_MINUS")
self.scale(marker, 0.5, proj_elem_params)
elif self.current == self.keyboard_params.CTRL_LEFT:
rospy.loginfo("VIZ_CTRL_LEFT")
self.rotate(marker, 1)
elif self.current == self.keyboard_params.CTRL_RIGHT:
rospy.loginfo("VIZ_CTRL_RIGHT")
self.rotate(marker, -1)
elif self.current == self.keyboard_params.ESC:
rospy.loginfo("VIZ_ESC")
marker.action = Marker.DELETE
def on_release(self, key):
"""Remove current stored key, on release.
Args:
key (enum): key pressed
"""
if any([key in COMBO for COMBO in self.keyboard_params.COMBINATIONS]):
if self.current == self.keyboard_params.ESC:
return False # stop listener
self.current.remove(key)
def marker_from_name(self, name):
"""Find marker object in the markers array with the name.
Args:
name (str): name of the marker
Returns:
object: marker found
"""
for marker in self.pe_marker_array.markers:
if name in marker.ns:
marker.action = Marker.ADD
return marker
return []
def init_keyboard_listener_cb(self, msg):
"""Start keyboard listener for monitoring key presses.
Args:
msg (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
from pynput import keyboard
self.keyboard_params = KeyboardParameters()
self.current = set()
name = self.active_cs + "/" + msg.projection_group + self.figures_list[msg.figure_type] + msg.figure_name
marker = self.marker_from_name(name)
if not marker:
return ProjectionElementResponse(False, "Marker not found.")
try:
on_press_handler = lambda event: self.on_press(event, marker=marker, proj_elem_params=msg)
listener = keyboard.Listener(on_press = on_press_handler,
on_release = self.on_release)
listener.start()
return ProjectionElementResponse(True, "Viz monitor.")
except Exception as e:
rospy.logerr(e)
return ProjectionElementResponse(False, "Error viz monitor.") | [
2,
15069,
357,
66,
8,
12131,
11,
376,
26853,
12,
34,
1404,
2943,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.307126 | 11,549 |
from storage import db
from storage.user import User
| [
6738,
6143,
1330,
20613,
198,
6738,
6143,
13,
7220,
1330,
11787,
628
] | 4.5 | 12 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .non_local import NonLocal_Direct
from .custom_activations import build_custom_activation
from .custom_norm import select_norm
| [
11748,
10688,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
764,
13159,
62,
12001,
1330,
8504,
14565,
62,
13470,
198,
6738,
764,
23144,
62,
15791,
602,
... | 3.616667 | 60 |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of H5Serv (HDF5 REST Server) Service, Libraries and #
# Utilities. The full HDF5 REST Server copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
"""
This class is used to map between HDF5 type representations and numpy types
"""
import numpy as np
from h5py.h5t import special_dtype
from h5py.h5t import check_dtype
from h5py.h5r import Reference
from h5py.h5r import RegionReference
"""
Convert the given type item to a predefined type string for
predefined integer and floating point types ("H5T_STD_I64LE", et. al).
For compound types, recursively iterate through the typeItem and do same
conversion for fields of the compound type.
"""
"""
Return type info.
For primitive types, return string with typename
For compound types return array of dictionary items
"""
"""
Get element type info - either a complete type or element of a compound type
Returns dictionary
Note: only getTypeItem should call this!
"""
"""
Get Base type info for given type element.
"""
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
15069,
416,
383,
5572,
37,
4912,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.436675 | 758 |
from pyarrow import Table
from pyarrow.parquet import ParquetWriter
import pyarrow as pa
import pandas as pd
from pyarrow import csv
include_columns = ['zincid', 'smiles', 'dockscore']
delimiter = str(',')
chunksize = 1048576*1000
file_stream = '/data/dockop_data/AmpC_screen_table.csv'
input_stream_reader = InputStreamReader(file_stream)
for i, batch in input_stream_reader.batches():
df = batch.to_pandas()
table = pa.Table.from_pandas(df)
schema = table.schema
print(f'Writing a total of {len(list(df['smiles']))} to disk.')
ParquetWriter(f'/data/newdockop/dockop/code/mod_code_base/parquet/AmpC_screen_table_part_{i}.parquet', schema).write_table(table) | [
6738,
12972,
6018,
1330,
8655,
198,
6738,
12972,
6018,
13,
1845,
21108,
1330,
2547,
21108,
34379,
198,
11748,
12972,
6018,
355,
14187,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
12972,
6018,
1330,
269,
21370,
198,
198,
17256,
62,
... | 2.65625 | 256 |
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hathor.checkpoint import Checkpoint as cp
from hathor.conf.settings import HathorSettings
SETTINGS = HathorSettings(
P2PKH_VERSION_BYTE=b'\x49',
MULTISIG_VERSION_BYTE=b'\x87',
NETWORK_NAME='testnet-golf',
BOOTSTRAP_DNS=['golf.testnet.hathor.network'],
# Genesis stuff
GENESIS_OUTPUT_SCRIPT=bytes.fromhex('76a914a584cf48b161e4a49223ed220df30037ab740e0088ac'),
GENESIS_TIMESTAMP=1577836800,
GENESIS_BLOCK_NONCE=826272,
GENESIS_BLOCK_HASH=bytes.fromhex('0000033139d08176d1051fb3a272c3610457f0c7f686afbe0afe3d37f966db85'),
GENESIS_TX1_NONCE=190,
GENESIS_TX1_HASH=bytes.fromhex('00e161a6b0bee1781ea9300680913fb76fd0fac4acab527cd9626cc1514abdc9'),
GENESIS_TX2_NONCE=115,
GENESIS_TX2_HASH=bytes.fromhex('00975897028ceb037307327c953f5e7ad4d3f42402d71bd3d11ecb63ac39f01a'),
# tx weight parameters. With these settings, tx weight is always 8
MIN_TX_WEIGHT_K=0,
MIN_TX_WEIGHT_COEFFICIENT=0,
MIN_TX_WEIGHT=8,
CHECKPOINTS=[
cp(100_000, bytes.fromhex('0000007ece4c7830169f360ed11c51b776e1b72bf0060e6e5b325ca8be474ac5')),
cp(200_000, bytes.fromhex('00000113ecd4b666116abf3d3f05ad509d903d6b456a1e8c35e46a9e426af11a')),
cp(300_000, bytes.fromhex('000000e42df13e4e7490cee98f303cb3b0ca33f362af180c5f7df740c98699d9')),
cp(400_000, bytes.fromhex('000000e9a748b34fc4d662d88bb36ef2a033ba129960924208be14eccdac1a65')),
cp(500_000, bytes.fromhex('000000b5c4572d7b85e585849540ece44b73948c5cdbc6f17a9a3a77fbd0f29a')),
cp(600_000, bytes.fromhex('000000f6743ba3d67e51d7adc21821b8263726ce3bc86010d5e1a905bf2531dc')),
cp(700_000, bytes.fromhex('0000008fda01c9e5fd6f99a5461e6dbf1039cba38cc8d0fc738a097d71caa968')),
cp(800_000, bytes.fromhex('000000397af32fcc4eeb6985d96326c1ff4644792631872a00394688b1782af5')),
cp(900_000, bytes.fromhex('00000097ae405036614f4335ad0e631df8fc5f7434e82c3421627e2fea4e1830')),
cp(1_000_000, bytes.fromhex('000000145ba662cdee0d72034658f93a0a3a4568d5ba5083ff09013ca1e6556c')),
cp(1_100_000, bytes.fromhex('000000404e6ff6a23695a6ffe712ce1c4efc02e75bbc11c3129f4c2377b07743')),
cp(1_200_000, bytes.fromhex('0000003be5fae5bb2c9ceaed589d172bcd9e74ca6c8d7d2ca06567f65cea7c9b')),
cp(1_300_000, bytes.fromhex('0000000000007d39de6e781c377bc202213b0b5b60db14c13d0b16e06d6fd5ac')),
],
)
| [
2,
15069,
33448,
36556,
273,
23500,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
74... | 2.102509 | 1,395 |
# -*- coding: utf-8 -*-
import unittest
import time
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
11748,
640,
198,
6738,
598,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
3210... | 2.847619 | 105 |
import os
from pathlib import Path
from zipfile import ZipFile
import boto3
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19974,
7753,
1330,
38636,
8979,
198,
11748,
275,
2069,
18,
628
] | 3.666667 | 21 |
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=broad-except
import logging
import datetime
try:
import simplejson as out_json
except ImportError:
import json as out_json
from flask import request
from flask.wrappers import Response
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql.expression import asc
from rapid.lib.exceptions import InvalidObjectException
from rapid.lib.store_service import StoreService
from rapid.workflow.data.models import PipelineEvent
from rapid.lib import api_key_required, get_db_session
from rapid.lib.constants import StatusConstants, ModuleConstants
from rapid.lib.exceptions import VcsNotFoundException
from rapid.lib.framework.injectable import Injectable
from rapid.lib.modules import CiModule
from rapid.master.data.database.dal.general_dal import GeneralDal
from rapid.workflow.data.models import Action, Pipeline, Stage, Workflow, PipelineInstance, PipelineParameters
logger = logging.getLogger("rapid")
| [
37811,
198,
15069,
357,
66,
8,
1853,
3899,
17558,
290,
347,
27708,
15172,
11419,
628,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,... | 3.889169 | 397 |
from configDmanager import import_config, Config
from DemPipe.executor import SimplePipeExecutor
| [
6738,
4566,
35,
37153,
1330,
1330,
62,
11250,
11,
17056,
198,
198,
6738,
1897,
47,
3757,
13,
18558,
38409,
1330,
17427,
47,
3757,
23002,
38409,
628
] | 3.807692 | 26 |
from django.contrib import admin
from .models import Artist
# Register your models here.
admin.site.register(Artist)
admin.site.site_header = 'Bardhub Administration'
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
18902,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
43020,
8,
198,
28482,
13,
15654,
13,
15654,
62,
25677,
796,
705,
33,
44... | 3.574468 | 47 |
import os
import os.path
import sys
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3419,
198
] | 2.666667 | 27 |
# Nama: Sofil Muna Aulia
# NIM: 0110120115
# Kelas: Sistem Informasi 05
# Mulai baris ini hingga baris paling bawah
# digunakan untuk mengetes fungsi yang telah dibuat.
# Tidak perlu mengubah bagian ini.
# Ketika dijalankan, program akan menampilkan contoh
# pemanggilan fungsi dan solusi yang seharusnya.
# Cocokkan hasil pemanggilan fungsi dengan solusi
# yang seharusnya.
if __name__ == '__main__':
test() | [
2,
399,
1689,
25,
1406,
10379,
337,
9613,
317,
43640,
198,
2,
399,
3955,
25,
5534,
8784,
1264,
1314,
198,
2,
15150,
292,
25,
311,
396,
368,
45255,
17053,
8870,
628,
198,
198,
2,
17996,
1872,
2318,
271,
287,
72,
289,
278,
4908,
231... | 2.404624 | 173 |
from collections import defaultdict
import torch
import jsonlines
class unified_emotion():
"""Class for the 'Unified Emotion Dataset'. Data from https://github.com/sarnthil/unify-emotion-datasets.
"""
def __init__(self, file_path, include=['grounded_emotions'], split_ratio=0.7, verbose=False, first_label_only=False):
"""
Class for the 'Unified Emotion Dataset'.
Data from https://github.com/sarnthil/unify-emotion-datasets.
Args:
file_path (str): path to the 'unified-dataset.jsonl' file
include (list, optional): if not None, will only use the datasets in the include list. Defaults to None
exclude (list, optional): tasks to exclude. Defaults to ['fb-valence-arousal-anon', 'emobank', 'affectivetext', 'emotion-cause', 'electoraltweets'].
split_ratio (float, optional): amount of data reserved for test sets. Defaults to 0.8.
"""
self.file_path = file_path
self.include = include
self.split_ratio = split_ratio
self.verbose = verbose
self.first_label_only = first_label_only
self.info = [row for row in unified_emotion_info() if row['source'] in self.include]
def prep(self, text_tokenizer=lambda x: x, text_tokenizer_kwargs=dict()):
"""Generates dataset from unified file.
Args:
text_tokenizer (callable, optional): function that processes a line of text. Defaults to identity (raw text).
"""
datasets = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
source_lengths = dict()
label_map = defaultdict()
with jsonlines.open(self.file_path) as file:
for i, line in enumerate(file.iter()):
# Skip line if not in include list
source = line['source']
if not source in self.include:
continue
# Give 'all' split if data doesn't have its own train/test split
split = 'all' if line.get('split', None) == None else line['split']
# Give line a data specific id
id = source_lengths.get(source, 0)
# Convert the labels
# Saves the mapping if this is the first line of a dataset
labels = {k: v for k, v in sorted(line['emotions'].items())
if v != None}
if id == 0:
label_map[source] = {k: i for i,
(k, _) in enumerate(labels.items())}
# All present emotions (labels > 1)
present_emotions = [emotion for emotion,
present in labels.items() if present > 0]
#text = text_tokenizer(line['text'], **text_tokenizer_kwargs)
text = line['text']
# Ensure proper encoding
try:
text = text.encode('latin-1').decode('utf8')
except (UnicodeEncodeError, UnicodeDecodeError):
if self.verbose:
print("Removed sentence for bad encoding")
continue
text = text_tokenizer(text, **text_tokenizer_kwargs)
# If the tokenizer removes the text, carry on
if text == None:
continue
if isinstance(text, list):
text = ' '.join(text)
# Ignore all remaining utf8 encodings and bring to 'plain' text
text = text.encode('ascii', 'ignore').decode('ascii')
# If more than 1 emotion is present, multiple examples are created
if (not self.first_label_only):
for i, emotion in enumerate(present_emotions):
label = label_map[source][emotion]
datasets[source][split][label].append(
{'idx': id, 'labels': label, 'text': text})
source_lengths[source] = id + i + 1
else:
label = label_map[source][present_emotions[0]]
datasets[source][split][label].append(
{'idx': id, 'labels': label, 'text': text})
source_lengths[source] = id + 1
for source in datasets.keys():
if len(datasets[source].keys()) == 1 and 'all' in datasets[source].keys():
class_lengths = {k: len(datasets[source]['all'][k])
for k in datasets[source]['all'].keys()}
for c, l in class_lengths.items():
train_l = int(self.split_ratio * l)
datasets[source]['train'][c] = datasets[source]['all'][c][:train_l]
val_l = train_l + int((1 - self.split_ratio) * l * 0.5)
datasets[source]['validation'][c] = datasets[source]['all'][c][train_l:val_l]
datasets[source]['test'][c] = datasets[source]['all'][c][val_l:]
del datasets[source]['all']
self.datasets = datasets
self.source_lengths = source_lengths
self.label_map = label_map
self.inv_label_map = {source: {val: key for key,
val in label_map[source].items()} for source in label_map.keys()}
# Remove classes with limited data
total_removed, total_data_removed = 0, 0
removing = []
for source in datasets.keys():
n_classes = len(datasets[source]['train'].keys())
for c in datasets[source]['train'].keys():
train_size = len(datasets[source]['train'][c])
val_size = len(datasets[source]['validation'][c])
test_size = len(datasets[source]['test'][c])
keep = (train_size >= 96 and val_size >= 64 and test_size >= 64)
if (not keep):
if self.verbose:
print("Removed {:}/{:} for too little data |train|={}, |test|={}".
format(source, self.inv_label_map[source][c], train_size, test_size))
total_removed += 1
total_data_removed += train_size + test_size
self.source_lengths[source] -= train_size + test_size
removing.append((source, c))
for source, c in removing:
del datasets[source]['train'][c]
del datasets[source]['validation'][c]
del datasets[source]['test'][c]
if self.verbose:
print("Removed a total of {:} classes and {:} examples.".format(
total_removed, total_data_removed))
for source in datasets.keys():
assert len(datasets[source]['train'].keys()) >= 2, print(
f"{source} has too few classes remaining.")
@property
def lens(self):
"""Lengths of the individual datasets
"""
return self.source_lengths
"""
def get_dataloader(self, source_name, device, k=4, tokenizer=None, shuffle=True):
Generates a dataloader from a specified dataset.
See MetaStratifiedLoader for more.
Args:
source_name(str): a dataset from one of the processed ones.
k(int, optional): the k-shot. Defaults to 4.
tokenizer(callable, optional): function that processes list of strings to PyTorch tensor. Defaults to None.
shuffle(boolean, optional): whether or not to shuffle the train data. Defaults to True.
Returns:
dataloaders: iterable of data_loaders. First is train, last is test.
data_loaders = []
for split in self.datasets[source_name].keys():
source_dict = self.datasets[source_name]
dataloader = MetaStratifiedLoader(source_dict=source_dict,
split=split,
class_to_int=self.label_map[source_name],
k=k,
tokenizer=tokenizer,
shuffle=shuffle,
device=device
)
if split == 'train':
data_loaders.insert(0, dataloader)
else:
data_loaders.append(dataloader)
return data_loaders
"""
| [
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
28034,
198,
11748,
33918,
6615,
628,
198,
4871,
22706,
62,
368,
9650,
33529,
198,
220,
220,
220,
37227,
9487,
329,
262,
705,
3118,
1431,
2295,
9650,
16092,
292,
316,
4458,
6060,
422,
3740... | 2 | 4,262 |
from matrix_bot_api.matrix_bot_api import MatrixBotAPI
from matrix_bot_api.mhandler import MHandler
import discord
import yaml
config = yaml.safe_load(open('config.yml'))
matrix_client = MatrixBotAPI(config.get('matrix').get('username'), config.get('matrix').get('password'), config.get('matrix').get('homeserver'))
discord_client = DiscordClient()
discord_client.run(config.get('discord').get('token'))
| [
6738,
17593,
62,
13645,
62,
15042,
13,
6759,
8609,
62,
13645,
62,
15042,
1330,
24936,
20630,
17614,
198,
6738,
17593,
62,
13645,
62,
15042,
13,
76,
30281,
1330,
337,
25060,
198,
11748,
36446,
198,
11748,
331,
43695,
198,
198,
11250,
796... | 3.037313 | 134 |
import os
import cv2
from PIL import Image
import numpy as np
from random import randint, choice, sample
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch
from torch import nn
from torch import optim
from torchvision import datasets,transforms
from torchvision.utils import save_image
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from utils import load_files_with_given_extension, random_idx_with_exclude
if __name__=='__main__':
img_transform = A.Compose(
[
A.Resize(100, 100),
A.RGBShift(),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(scale_limit=0.1, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
A.PadIfNeeded(min_height=100, min_width=100, always_apply=True, border_mode=0),
A.IAAAdditiveGaussianNoise(p=0.1),
A.IAAPerspective(p=0.1),
A.RandomBrightnessContrast(p=0.1),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2(),
])
dataset_path = 'dataset/'
dataset = DatasetMultipleFaces(dataset_path=dataset_path,
img_transform=img_transform)
image1, image2, class_idx = dataset[0]
print('image1.shape: ', image1.shape)
save_image(image1, 'image1.jpg')
print('image2.shape: ', image2.shape)
save_image(image2, 'image2.jpg')
print('class_idx: ', class_idx) | [
11748,
28686,
198,
11748,
269,
85,
17,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
43720,
600,
11,
3572,
11,
6291,
198,
11748,
435,
65,
1713,
602,
355,
317,
198,
6738,
435,
65,
1713,
... | 1.866808 | 946 |
import base64
import hashlib
import json
import logging
import requests
import os
import sys
import six
log = logging.getLogger(__name__)
SENSITIVE_HEADERS = ['X-Token']
FILES_EXTENSIONS = ("png", "jpg", "svg", "txt")
def resource_filter(func):
"""This decorator allows to you filter answer from RESOURCE.list() by
project_id and region.
Both params are optional and may be used separately.
Example:
selvpc --debug floatingip list
selvpc --debug floatingip list --project=UUID
selvpc --debug floatingip list --region=REGION
selvpc --debug floatingip list --project=UUID --region=REGION
client.subnets.list(project=UUID)
client.subnets.list(region=REGION)
client.subnets.list(project=UUID, region=REGION)
"""
return wrap
def confirm_action(action):
"""Func must be a take_action func."""
return wrap
def get_item_properties(item, fields, mixed_case_fields=(), formatters=None):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Tenant, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
if formatters is None:
formatters = {}
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](item))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not hasattr(item, field_name) and isinstance(item, dict):
data = item[field_name]
else:
data = getattr(item, field_name, '')
if data is None:
data = ''
row.append(data)
return tuple(row)
def sort_list_of_dicts(list_, dict_key):
"""Sort list of dicts by dict key
:param list list_: List of dicts,
:param string dict_key: Dict key for sorting.
:rtype: list
"""
# NOTE: Python 3 introduced new rules for ordering comparisons:
# See detailed here (chapter ordering-comparisons)
# https://docs.python.org/release/3.0.1/whatsnew/3.0.html
items = []
for item in list_:
if item[dict_key] is None:
item[dict_key] = str()
items.append(item)
return sorted(items, key=lambda item: item[dict_key])
def build_url(*args):
"""Build URL by provided parts of url.
Also this method strip all right slashes.
:param args: Parts of url.
:rtype: str
"""
return "/".join([part.rstrip('/') for part in args])
def update_json_error_message(content):
"""Converts and capitalize JSON error to normal message.
:param str content: JSON-answer from server.
:rtype: str
"""
if 'error' in content:
try:
message = json.loads(content)['error']
return message.capitalize().replace('_', ' ')
except Exception:
return content
def try_parse_json(json_):
"""Converts the string representation of JSON to JSON.
:param str json_: JSON in str representation.
:rtype: :class:`dict` if converted successfully, otherwise False.
"""
if not json_:
return False
try:
return json.loads(json_)
except ValueError:
return False
def is_url(data):
"""Checks if getting value is valid url and path exists."""
try:
r = requests.head(data)
except Exception:
return False
return r.status_code == requests.codes.ok
def process_logo_by_url(url):
"""Download and encode image by url."""
res = requests.get(url)
encoded_logo = base64.b64encode(res.content)
return encoded_logo
def process_theme_params(func):
"""This decorator allows to enter path to logo/url to logo
and adds hash to color value."""
return inner
def process_pair_params(func):
"""This decorator allows to enter path to ~/.ssh/id_rsa.pub or provide
id_rsa.pub as plain-text.
"""
return inner
| [
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
2237,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
19... | 2.561552 | 1,649 |
"""Utilities for Galaxy scripts
"""
import argparse
import os
import sys
from galaxy.util.properties import find_config_file, load_app_properties
DESCRIPTION = None
ACTIONS = None
ARGUMENTS = None
DEFAULT_ACTION = None
ARG_HELP_CONFIG_FILE = """
Galaxy config file (defaults to $GALAXY_ROOT/config/galaxy.yml if that file exists
or else to ./config/galaxy.ini if that exists). If this isn't set on the
command line it can be set with the environment variable GALAXY_CONFIG_FILE.
"""
# ARG_HELP_CONFIG_SECTION = """
# Section containing application configuration in the target config file specified with
# -c/--config-file. This defaults to 'galaxy' for YAML/JSON configuration files and 'main'
# with 'app:' prepended for INI. If this isn't set on the command line it can be set with
# the environment variable GALAXY_CONFIG_SECTION.
# """
def main(argv=None):
"""Entry point for conversion process."""
if argv is None:
argv = sys.argv[1:]
args = _arg_parser().parse_args(argv)
kwargs = app_properties_from_args(args)
action = args.action
action_func = ACTIONS[action]
action_func(args, kwargs)
| [
37811,
18274,
2410,
329,
9252,
14750,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
16161,
13,
22602,
13,
48310,
1330,
1064,
62,
11250,
62,
7753,
11,
3440,
62,
1324,
62,
48310,
198,
198,
3091... | 2.997375 | 381 |
# ------------------------------------------
# IC3PO: IC3 for Proving Protocol Properties
# ------------------------------------------
# Copyright (c) 2021 Aman Goel and Karem Sakallah, University of Michigan.
# All rights reserved.
#
# Author: Aman Goel (amangoel@umich.edu), University of Michigan
# ------------------------------------------
from __future__ import print_function
import sys
import time
import common
import math
from pysmt.pretty_printer import pretty_serialize
from pysmt.shortcuts import *
times = []
start_time = 0
SORT_SUFFIX = ":e"
# def print_smt2(self, cl):
# solver = Solver(name="z3")
# solver.add_assertion(cl)
# solver.solve()
# cl_smt2 = solver.to_smt2()
# print(cl_smt2)
#
# # print(cl)
#
# def print_smt2_set(self, inv_set):
# print("Proof certificate (SMT-LIB): #%d" % len(inv_set))
# print("-------------------------------------------------")
# count = 0
# for cl in inv_set:
# count += 1
# print("invariant [ic3po_%d]\t" % count, end='')
# self.print_smt2(cl)
# print("-------------------------------------------------")
| [
2,
20368,
35937,
198,
2,
12460,
18,
16402,
25,
12460,
18,
329,
1041,
1075,
20497,
24946,
198,
2,
20368,
35937,
198,
2,
15069,
357,
66,
8,
33448,
220,
42614,
1514,
417,
290,
49396,
76,
13231,
31840,
11,
2059,
286,
7055,
13,
220,
198,... | 2.490722 | 485 |
import tensorflow as tf
import numpy as np
# Asymmetric mean squared error
def bootstrap(full_model, non_nlp_model, X_full, y, score_func, n_boot=100, nlp_cols=None):
"""Resamples X to calculate `n_boot` pairs of full and non-nlp model scores
Args:
full_model (model): must have .predict method
non_nlp_model (model): must have .predict method
X_full (pd.DataFrame): full X dataframe including NLP columns
y (array-like): target variables
score_func (function): must have argument `score_func(y_true, y_pred)`
n_boot (int): number of bootstrap iterations
nlp_cols (list): list of NLP columns. See code for default value
"""
if nlp_cols is None:
nlp_cols = ['compound', 'emb1', 'emb10', 'emb11', 'emb12', 'emb13', 'emb14',
'emb15', 'emb16', 'emb2', 'emb3', 'emb4', 'emb5', 'emb6', 'emb7',
'emb8', 'emb9', 'neg', 'neu', 'pos', 'subjectivity', 'topic_18',
'topic_6']
# get predictions
X_non_nlp = X_full.drop(nlp_cols, axis=1)
y_pred_full = full_model.predict(X_full)
y_pred_non_nlp = non_nlp_model.predict(X_non_nlp)
X_non_nlp = np.array(X_non_nlp)
# resample test set
full_scores = []
non_nlp_scores = []
for i in range(n_boot):
boot_idxs = np.random.choice(X_full.shape[0], size=X_full.shape[0], replace=True)
X_boot = X_full.iloc[boot_idxs]
y_true_boot = y.iloc[boot_idxs]
y_pred_full_boot = y_pred_full[boot_idxs]
y_pred_non_nlp_boot = y_pred_non_nlp[boot_idxs]
full_scores.append(score_func(y_true_boot, y_pred_full_boot))
non_nlp_scores.append(score_func(y_true_boot, y_pred_non_nlp_boot))
return np.array(full_scores), np.array(non_nlp_scores) | [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
1081,
26621,
19482,
1612,
44345,
4049,
628,
198,
4299,
6297,
26418,
7,
12853,
62,
19849,
11,
1729,
62,
21283,
79,
62,
19849,
11,
1395,
62,
12853,
1... | 2.1293 | 843 |
max_parallel_processes = 100
| [
9806,
62,
1845,
29363,
62,
14681,
274,
796,
1802,
198
] | 2.9 | 10 |
import os
import re
import pandas as pd # type: ignore
import requests
BASE_URL = "http://api.census.gov/data/"
# Core functions
def get_asec(year: int, vars: list[str], show_url: bool = False) -> pd.DataFrame:
"""Get CPS ASEC microdata using the Census API."""
key = _get_key()
_check_year(year, dataset="asec")
formatted_vars = _format_vars(vars)
url = f"{BASE_URL}{year}/cps/asec/mar?get={formatted_vars}&key={key}"
print(f"Getting CPS ASEC microdata for {year}")
df = _get_data(url, show_url)
return df
def get_basic(
year: int, month: int, vars: list[str], show_url: bool = False
) -> pd.DataFrame:
"""Get basic monthly CPS microdata using the Census API."""
key = _get_key()
_check_year(year, dataset="basic")
month_name, month_abb = _get_month_info(month)
formatted_vars = _format_vars(vars)
url = f"{BASE_URL}{year}/cps/basic/{month_abb}?get={formatted_vars}&key={key}"
print(f"Getting basic monthly CPS microdata for {month_name} {year}")
df = _get_data(url, show_url)
return df
# Helpers
class CensusAPIRequestError(Exception):
"""Raise if Census API request fails."""
# Create custom exception since clear built-in does not exist
class EnvVarNotFoundError(Exception):
"""Raise if environment variable is not found."""
if __name__ == "__main__":
# Get inputs
print(
"Hello! This if-name-main code calculates the employment-to-population",
"(EPOP) ratio for a given month and year.",
)
month_year = input(
"Please provide a month and year in MM/YYYY format (e.g., 09/2021): "
)
month, year = [int(x) for x in month_year.split("/")]
month_name, month_abb = _get_month_info(month)
# Get data
print() # For empty line
cps = get_basic(year, month, ["prpertyp", "prtage", "pemlr", "pwcmpwgt"], True)
print("\nRaw data:", cps, sep="\n")
# Clean data
cps = cps.loc[(cps.prpertyp == 2) & (cps.prtage >= 16)]
cps["pop16plus"] = True # Given above filter
cps["employed"] = cps.pemlr.isin([1, 2])
# Analyze data
results = (
cps[["pop16plus", "employed"]]
.apply(lambda x, wt: x.dot(wt), wt=cps.pwcmpwgt) # Weighted sum
.astype(int)
)
print("\nWeighted sums:", results, sep="\n")
# Calculate EPOP ratio
print(
f"\nThe EPOP ratio for {month_name} {year} was",
f"{results['employed'] / results['pop16plus']:.1%}.",
)
| [
11748,
28686,
198,
11748,
302,
198,
198,
11748,
19798,
292,
355,
279,
67,
220,
1303,
2099,
25,
8856,
198,
11748,
7007,
628,
198,
33,
11159,
62,
21886,
796,
366,
4023,
1378,
15042,
13,
66,
7314,
13,
9567,
14,
7890,
30487,
628,
198,
2... | 2.42913 | 1,023 |
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from cmframework.utils import cmactivationwork
from cmframework.server import cmeventletrwlock
from cmframework.server import cmcsn
from cmframework.server import cmsnapshot
from cmframework.utils.cmflagfile import CMFlagFile
from cmframework.utils import cmalarm
| [
2,
15069,
13130,
26182,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4... | 3.876712 | 219 |
from typing import List
import time
| [
6738,
19720,
1330,
7343,
198,
11748,
640,
628
] | 4.625 | 8 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import memoized
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
#from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.local import local_settings
import smtplib
import re, requests
try:
import simplejson as json
except ImportError:
import json
except:
raise
from astutedashboard.common import get_admin_ksclient, \
get_billing_types, \
create_billing_type_mapping, \
modify_billing_type_mapping, \
create_user_letter, \
get_projects, \
get_project, \
create_project, \
create_user, \
get_tenants, \
get_users, \
get_neutron_client, \
create_network, \
create_subnet, \
list_network, \
list_subnet, \
create_router, \
add_interface_to_router
from astutedashboard.dashboards.billing.cipher import encrypt
from openstack_dashboard.local.local_settings import CIPHER_KEY
ACCOUNT_MAPPING_FIELDS = (
"domain_id",
"domain_name",
"project_mapping",
"project_name",
"description",
"username",
"password",
"confirm_password",
"project_id",
"billing_type"
)
ACCOUNT_EXTRA_FIELDS = (
"crm_account_num",
"service_id",
"customer_name",
"business_reg_num",
"registered_address",
"authorized_officer_name",
"authorized_officer_nric",
"authorized_officer_phone",
"authorized_officer_email",
"account_manager"
)
ACCOUNT_QUOTA_FIELDS = (
"quota_instances",
"quota_cores",
"quota_ram",
"quota_floating_ips",
"quota_fixed_ips",
"quota_gigabytes"
)
COMMON_HORIZONTAL_TEMPLATE = "billing/type_mappings/_common_form.html"
WELCOME_EMAIL_TEMPLATE = "billing/type_mappings/welcome_email.html"
# send multipart email
password_requirement_str = 'must be at least 8 chars long and contain of mixed case and digit chars'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
28243,
13,
29356,
1330,
8... | 2.085813 | 1,445 |
"""
Abstract Base Class
"""
import sqlite3
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import List
from budgethelper.models.database import Database
| [
37811,
198,
23839,
7308,
5016,
198,
37811,
198,
11748,
44161,
578,
18,
198,
6738,
450,
66,
1330,
9738,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
4466,
2978,
525,
... | 4.020833 | 48 |
import paddle as torch
import paddle
import paddle.nn.functional as F
import math
def dense_patch_slices(image_size, patch_size, scan_interval):
"""
Enumerate all slices defining 2D/3D patches of size `patch_size` from an `image_size` input image.
Args:
image_size (tuple of int): dimensions of image to iterate over
patch_size (tuple of int): size of patches to generate slices
scan_interval (tuple of int): dense patch sampling interval
Returns:
a list of slice objects defining each patch
"""
num_spatial_dims = len(image_size)
if num_spatial_dims not in (2, 3):
raise ValueError('image_size should has 2 or 3 elements')
patch_size = patch_size
scan_interval = scan_interval
scan_num = [int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1
for i in range(num_spatial_dims)]
slices = []
if num_spatial_dims == 3:
for i in range(scan_num[0]):
start_i = i * scan_interval[0]
start_i -= max(start_i + patch_size[0] - image_size[0], 0)
slice_i = slice(start_i, start_i + patch_size[0])
for j in range(scan_num[1]):
start_j = j * scan_interval[1]
start_j -= max(start_j + patch_size[1] - image_size[1], 0)
slice_j = slice(start_j, start_j + patch_size[1])
for k in range(0, scan_num[2]):
start_k = k * scan_interval[2]
start_k -= max(start_k + patch_size[2] - image_size[2], 0)
slice_k = slice(start_k, start_k + patch_size[2])
slices.append((slice_i, slice_j, slice_k))
else:
for i in range(scan_num[0]):
start_i = i * scan_interval[0]
start_i -= max(start_i + patch_size[0] - image_size[0], 0)
slice_i = slice(start_i, start_i + patch_size[0])
for j in range(scan_num[1]):
start_j = j * scan_interval[1]
start_j -= max(start_j + patch_size[1] - image_size[1], 0)
slice_j = slice(start_j, start_j + patch_size[1])
slices.append((slice_i, slice_j))
return slices
def sliding_window_inference(inputs, roi_size, sw_batch_size, predictor):
"""Use SlidingWindow method to execute inference.
Args:
inputs (torch Tensor): input image to be processed (assuming NCHW[D])
roi_size (list, tuple): the window size to execute SlidingWindow inference.
sw_batch_size (int): the batch size to run window slices.
predictor (Callable): given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)`
should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D];
where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`.
Note:
must be channel first, support both 2D and 3D.
input data must have batch dim.
execute on 1 image/per inference, run a batch of window slices of 1 input image.
"""
num_spatial_dims = len(inputs.shape) - 2
assert len(roi_size) == num_spatial_dims, 'roi_size {} does not match input dims.'.format(roi_size)
# determine image spatial size and batch size
# Note: all input images must have the same image size and batch size
image_size = list(inputs.shape[2:])
batch_size = inputs.shape[0]
# TODO: Enable batch sizes > 1 in future
if batch_size > 1:
raise NotImplementedError
original_image_size = [image_size[i] for i in range(num_spatial_dims)]
# in case that image size is smaller than roi size
image_size = tuple(max(image_size[i], roi_size[i]) for i in range(num_spatial_dims))
pad_size = [i for k in range(len(inputs.shape) - 1, 1, -1) for i in (0, max(roi_size[k - 2] - inputs.shape[k], 0))]
inputs = F.pad(inputs, pad=pad_size, mode='constant', value=0,data_format="NDHWC")
# TODO: interval from user's specification
scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims)
# Store all slices in list
slices = dense_patch_slices(image_size, roi_size, scan_interval)
slice_batches = []
for slice_index in range(0, len(slices), sw_batch_size):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
input_slices = []
for curr_index in slice_index_range:
if num_spatial_dims == 3:
slice_i, slice_j, slice_k = slices[curr_index]
input_slices.append(inputs[0, :, slice_i, slice_j, slice_k])
else:
slice_i, slice_j = slices[curr_index]
input_slices.append(inputs[0, :, slice_i, slice_j])
slice_batches.append(torch.stack(input_slices))
# Perform predictions
output_rois = list()
for data in slice_batches:
seg_prob = predictor(data) # batched patch segmentation
output_rois.append(seg_prob[0].numpy())
# stitching output image
output_classes = output_rois[0].shape[1]
output_shape = [batch_size, output_classes] + list(image_size)
# allocate memory to store the full output and the count for overlapping parts
output_image = torch.zeros(output_shape, dtype=torch.float32).numpy()
count_map = torch.zeros(output_shape, dtype=torch.float32).numpy()
for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
# store the result in the proper location of the full output
for curr_index in slice_index_range:
if num_spatial_dims == 3:
slice_i, slice_j, slice_k = slices[curr_index]
ors=output_rois[window_id][curr_index - slice_index, :]
output_image[0, :, slice_i, slice_j, slice_k] += ors
count_map[0, :, slice_i, slice_j, slice_k] += 1.
else:
slice_i, slice_j = slices[curr_index]
output_image[0, :, slice_i, slice_j] += output_rois[window_id][curr_index - slice_index, :]
count_map[0, :, slice_i, slice_j] += 1.
# account for any overlapping sections
output_image /= count_map
output_image=paddle.to_tensor(output_image)
if num_spatial_dims == 3:
return (output_image[..., :original_image_size[0], :original_image_size[1], :original_image_size[2]],)
return (output_image[..., :original_image_size[0], :original_image_size[1]] ,) # 2D
| [
11748,
39517,
355,
28034,
198,
11748,
39517,
198,
11748,
39517,
13,
20471,
13,
45124,
355,
376,
198,
198,
11748,
10688,
198,
198,
4299,
15715,
62,
17147,
62,
82,
677,
274,
7,
9060,
62,
7857,
11,
8529,
62,
7857,
11,
9367,
62,
3849,
2... | 2.274476 | 2,911 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an ad extension override to a given campaign. To get
campaigns, run get_campaigns.py.
Tags: GeoLocationService.get, AdExtensionOverrideService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_id = 'INSERT_AD_GROUP_AD_ID_HERE'
ad_extension_id = 'INSERT_AD_EXTENSION_ID_HERE'
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_id, ad_extension_id)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
2321,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
... | 3.137116 | 423 |
from __future__ import annotations
import escnn.group
from escnn.group import Representation, GroupElement, Group
from escnn.group._numerical import decompose_representation_finitegroup
from escnn.group._numerical import decompose_representation_general
from typing import Callable, Any, List, Union, Dict, Tuple, Type
import numpy as np
__all__ = [
"IrreducibleRepresentation",
"build_irrep_from_generators",
"generate_irrep_matrices_from_generators",
"restrict_irrep"
]
from joblib import Memory
# import os
# cache = Memory(os.path.join(os.path.dirname(__file__), '_jl_restricted_irreps'), verbose=2)
from escnn.group import __cache_path__
cache = Memory(__cache_path__, verbose=0)
@cache.cache
def restrict_irrep(irrep: IrreducibleRepresentation, id) -> Tuple[np.matrix, List[Tuple[str, int]]]:
r"""
Restrict the input `irrep` to the subgroup identified by `id`.
"""
group_keys = irrep.group._keys
id = irrep.group._encode_subgroup_id_pickleable(id)
return _restrict_irrep(irrep.id, id, irrep.group.__class__.__name__, **group_keys)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
3671,
20471,
13,
8094,
198,
6738,
3671,
20471,
13,
8094,
1330,
10858,
341,
11,
4912,
20180,
11,
4912,
198,
6738,
3671,
20471,
13,
8094,
13557,
77,
6975,
605,
1330,
26969,
3455,
62,
... | 2.770202 | 396 |
#
# Copyright (c) 2015-2020 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_file_views.widget module
This module defines data converters and and form widgets which are required to manage files
and images.
"""
import os.path
from cgi import FieldStorage
from datetime import datetime
from pyramid.interfaces import IView
from zope.component import queryMultiAdapter
from zope.dublincore.interfaces import IZopeDublinCore
from zope.interface import implementer_only
from pyams_file.file import EXTENSIONS_THUMBNAILS
from pyams_file.interfaces.thumbnail import IThumbnails
from pyams_file.schema import IFileField, IMediaField
from pyams_form.browser.file import FileWidget as FileWidgetBase
from pyams_form.converter import BaseDataConverter
from pyams_form.interfaces import DISPLAY_MODE, IDataConverter, INPUT_MODE
from pyams_form.interfaces.widget import IFieldWidget, IFileWidget, IMediaFileWidget
from pyams_form.template import widget_template_config
from pyams_form.util import to_bytes
from pyams_form.widget import FieldWidget
from pyams_layer.interfaces import IPyAMSLayer
from pyams_utils.adapter import adapter_config
from pyams_utils.interfaces.form import NOT_CHANGED, TO_BE_DELETED
from pyams_utils.size import get_human_size
from pyams_utils.url import absolute_url
__docformat__ = 'restructuredtext'
@adapter_config(required=(IFileField, IFileWidget), provides=IDataConverter)
class FileUploadDataConverter(BaseDataConverter):
"""File upload data converter"""
@widget_template_config(mode=INPUT_MODE,
template='templates/file-input.pt', layer=IPyAMSLayer)
@widget_template_config(mode=DISPLAY_MODE,
template='templates/file-display.pt', layer=IPyAMSLayer)
@implementer_only(IFileWidget)
class FileWidget(FileWidgetBase):
"""File widget"""
@property
def timestamp(self):
"""Image timestamp getter"""
dc = IZopeDublinCore(self.current_value, None) # pylint: disable=invalid-name
if dc is None:
return datetime.utcnow().timestamp()
return dc.modified.timestamp() # pylint: disable=no-member
@property
def current_value(self):
"""Widget value getter"""
if self.form.ignore_context:
return None
value = self.field.get(self.context)
if isinstance(value, dict):
lang = getattr(self, 'lang', None)
if lang is not None:
value = value.get(lang)
return value
@property
def deletable(self):
"""Widget deletable flag getter"""
if self.required:
return False
if not self.ignore_context:
value = self.current_value
else:
value = self.value
return bool(value)
def get_human_size(self):
"""File human size getter"""
return get_human_size(self.current_value.get_size(), self.request)
def get_thumbnail(self, geometry='128x128'):
"""File thumbnail getter"""
thumbnails = IThumbnails(self.current_value, None)
if thumbnails is not None:
display = thumbnails.get_thumbnail(geometry) # pylint: disable=assignment-from-no-return
if display is not None:
dc = IZopeDublinCore(display, None) # pylint: disable=invalid-name
if dc is None:
timestamp = self.timestamp
else:
timestamp = dc.modified.timestamp() # pylint: disable=no-member
return '{}?_={}'.format(absolute_url(display, self.request),
timestamp)
_name, ext = os.path.splitext(self.current_value.filename)
return '/--static--/pyams_file/img/{}'.format(
EXTENSIONS_THUMBNAILS.get(ext, 'unknown.png'))
def get_thumbnail_target(self):
"""Widget thumbnail target getter"""
value = self.current_value
if value is not None:
view = queryMultiAdapter((value, self.request), IView, name='preview.html')
if view is not None:
return absolute_url(value, self.request, 'preview.html')
return None
@adapter_config(required=(IFileField, IPyAMSLayer), provides=IFieldWidget)
def FileFieldWidget(field, request): # pylint: disable=invalid-name
"""File field widget factory"""
return FieldWidget(field, FileWidget(request))
#
# Medias files widget
#
@widget_template_config(mode=INPUT_MODE,
template='templates/media-input.pt', layer=IPyAMSLayer)
@widget_template_config(mode=DISPLAY_MODE,
template='templates/media-display.pt', layer=IPyAMSLayer)
@implementer_only(IMediaFileWidget)
class MediaFileWidget(FileWidget):
"""Media file widget"""
@adapter_config(required=(IMediaField, IPyAMSLayer), provides=IFieldWidget)
def MediaFileFieldWidget(field, request): # pylint: disable=invalid-name
"""Media file field widget factory"""
return FieldWidget(field, MediaFileWidget(request))
| [
2,
198,
2,
15069,
357,
66,
8,
1853,
12,
42334,
536,
959,
563,
4432,
330,
1279,
83,
2704,
273,
330,
5161,
14856,
400,
283,
13,
3262,
29,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
... | 2.574468 | 2,115 |
from modsim import *
| [
6738,
13743,
320,
1330,
1635,
628
] | 3.666667 | 6 |
import collections
from textmining import normalizer
from statistics import mean, variance
from math import floor
class WebEntity(collections.MutableMapping):
"""
Represents a web entity and all its potential
attributes.
Attributes are accessible as in a dictionary
"""
def __init__(self):
"""
Initializes attributes
"""
self.set_attributes = {"localId", "summary", "email", "url", "phone", "fax", "domain", "contact", "contactform",
"legal", "useterms", "rss", "mobile", "responsive", "capital", "outlinks", "delivery_options", "payment_options"}
self.set_attributes.update(["monitoring", "seo"])
self.list_attributes = {"cms", "ecommerce", "addresses", "basket", "prices", "prices_per_page"}
self.str_attributes = {"description", "metadescription", "country"}
self.social_attributes = {"twitter", "facebook", "linkedin", "viadeo", "googleplus", "instagram", "youtube",
"dailymotion", "vimeo"}
self.dict_attributes = {"ecommerce_meta"}
self.attr = dict()
self.normzer = normalizer.Normalizer()
for a in self.set_attributes:
self.attr[a] = set()
for a in self.list_attributes:
self.attr[a] = list()
for a in self.str_attributes:
self.attr[a] = None
for a in self.social_attributes:
self.attr[a] = {}
for a in self.dict_attributes:
self.attr[a] = {}
self.attributes = self.set_attributes | self.str_attributes | self.list_attributes | self.social_attributes | self.dict_attributes
def export(self):
"""
Export all attributes in a dictionary
which can be rendered in json.
"""
attr = self.attr.copy()
# Json needs different social structure for JBM
for a in self.social_attributes:
social = []
for account in attr[a].values():
social.append(
{"account": account.account, "score": account.score, "profilePictureUrl": account.profile_picture})
attr[a] = social
# Json loader can't manage set objects
for a in self.set_attributes:
if a in ["responsive", "legal", "useterms", "seo", "mobile"]:
if True in attr[a]:
attr[a] = True
else:
attr[a] = False
elif a == "contact":
cts = []
for c in attr[a]:
cts.append(c.to_dict())
attr[a] = cts
elif a == "email":
emails = []
for e in attr[a]:
emails.append({"email": e[0], "generic": not e[1]})
attr[a] = emails
elif a == "rss":
rss = []
for r in attr[a]:
if r[0] is not None and r[1] is not None:
rss.append({"url": r[0], "frequency": r[1]})
attr[a] = rss
elif a == "summary":
attr[a] = attr[a].get_best_words(20, dic=True)
elif type(attr[a]) == set:
attr[a] = list(attr[a])
# Managing addresses
la = []
venues = set()
for addr in attr["addresses"]:
if addr.address not in venues:
a = {"address": addr.address, "zipcode": addr.zipcode, "city": addr.city}
venues.add(addr.address)
la.append(a)
attr["addresses"] = la
return attr
def normalize(self, pages_count):
"""
Normalizes attributes
"""
# Normalizes phone numbers
np = set()
for phone in self.attr["phone"]:
n = self.normzer.normalize_phone_number(phone)
if n is not None:
np.add(n)
# If normalization failed, we do not record phone
else:
pass
self.attr["phone"] = np
# Normalizes fax numbers
nf = set()
for fax in self.attr["fax"]:
f = self.normzer.normalize_phone_number(fax)
if f is not None:
nf.add(f)
# If normalization failed, we do not record fax
else:
pass
self.attr["fax"] = nf
# Normalize CMS found
cms = set()
res = []
for c in self.attr["cms"]:
if c["type"] not in cms:
res.append(c)
cms.add(c["type"])
self.attr["cms"] = res
# Normalize shopping platform found
shop = set()
res = []
for c in self.attr["ecommerce"]:
if c["type"] not in shop:
res.append(c)
shop.add(c["type"])
self.attr["ecommerce"] = res
if pages_count > 0:
baskets = len([x for x in self.attr["basket"] if x is True])
self.attr["ecommerce_meta"]["perc_pages_with_prices"] = self.attr["ecommerce_meta"]["pages_with_prices"] / pages_count
self.attr["ecommerce_meta"]["pages_with_basket"] = baskets
self.attr["ecommerce_meta"]["perc_pages_with_basket"] = baskets / pages_count
self.attr["ecommerce_meta"]["avg_price"] = mean(self.attr["prices"]) if len(self.attr["prices"]) > 0 else None
self.attr["ecommerce_meta"]["variance"] = variance(self.attr["prices"]) if len(self.attr["prices"]) > 1 else None
self.attr["ecommerce_meta"]["avg_prices_per_page"] = mean(self.attr["prices_per_page"]) if len(self.attr["prices"]) > 0 else None
# Computing quartiles
if len(self.attr["prices"]) > 0:
prices = sorted(self.attr["prices"])
tot = len(prices)
median = prices[floor(tot / 2)]
quart1 = prices[floor(tot / 4)]
quart3 = prices[floor(tot / 4 * 3)]
else:
median = quart1 = quart3 = None
self.attr["ecommerce_meta"]["median_price"] = median
self.attr["ecommerce_meta"]["first_quart_price"] = quart1
self.attr["ecommerce_meta"]["third_quart_price"] = quart3
# No pages crawled, values representing volumes must be initialized at 0
else:
for bkey in ["perc_pages_with_prices", "pages_with_basket", "perc_pages_with_basket", "pages_with_prices"]:
self.attr["ecommerce_meta"][bkey] = 0
self.attr["ecommerce_meta"]["payment_options"] = list(self.attr["ecommerce_meta"]["payment_options"])
self.attr["ecommerce_meta"]["delivery_options"] = list(self.attr["ecommerce_meta"]["delivery_options"])
# Remove potentially big fields unnecessary for JBM
del self.attr["prices"]
del self.attr["basket"]
del self.attr["prices_per_page"]
def __getitem__(self, key):
"""
Overrides dict class method
"""
return self.attr[key]
def __setitem__(self, key, value):
"""
Overrides dict class method.
Our dict is read only, no set possible.
"""
if key not in self.attributes:
raise ReadOnlyDictionaryException
else:
self.attr[key] = value
| [
11748,
17268,
198,
6738,
2420,
45374,
1330,
3487,
7509,
198,
6738,
7869,
1330,
1612,
11,
24198,
198,
6738,
10688,
1330,
4314,
628,
198,
198,
4871,
5313,
32398,
7,
4033,
26448,
13,
44,
18187,
44,
5912,
2599,
198,
220,
220,
220,
37227,
... | 2.026744 | 3,627 |
import socket
if __name__ == '__main__':
get_remote_machine_info() | [
11748,
17802,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
651,
62,
47960,
62,
30243,
62,
10951,
3419
] | 2.730769 | 26 |
#!/usr/bin/python
import include
import os
import csv
import sys
import re
from collections import Counter
ERROR_MODEL_SIZE = len(include.EM_STR)
INSTRUCTION_SIZE = len(include.ASID_STR)
inst_type="rf"
MAX_LOGS_SIZE=9999999999
# except:
# e = sys.exc_info()[0]
# #write_to_page( "<p>Error: %s</p>" % e )
# print e
if __name__ == "__main__":
parameter = sys.argv[1:]
#()
if len(parameter) < 3:
usage()
else:
print parameter[3]
if parameter[3] != 'caio':
inst_type = (parameter[3] if parameter[3] == 'rf' else 'inst')
parse_csv(parameter[0], parameter[1], (True if parameter[2] == 'cp' else False))
#():
else:
process_daniels_and_caios_log(parameter[0], parameter[1], parameter[2])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
2291,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
11748,
302,
198,
6738,
17268,
1330,
15034,
198,
198,
24908,
62,
33365,
3698,
62,
33489,
796,
18896,
7,
17256,
13,... | 2.171662 | 367 |
from __future__ import annotations
import asyncio
import datetime
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from dataclasses import dataclass, field, replace
from decimal import Decimal
from typing import Any
import aiohttp
import aiohttp.web
import pytest
from yarl import URL
from neuro_admin_client import (
Balance,
Cluster,
ClusterUser,
ClusterUserRoleType,
Org,
OrgCluster,
OrgUser,
OrgUserRoleType,
Quota,
User,
)
@dataclass
@dataclass(frozen=True)
@dataclass()
@pytest.fixture
@pytest.fixture
@asynccontextmanager
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
11748,
4818,
8079,
198,
6738,
17268,
13,
39305,
1330,
1081,
13361,
37787,
198,
6738,
4732,
8019,
1330,
355,
2047,
535,
261,
5239,
37153,
198,
6738,
4818,
330,
28958,
... | 2.81448 | 221 |
import time
from contextlib import suppress, contextmanager
from astropy import units as u
from panoptes.utils import error
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time, wait_for_events, CountdownTimer
from panoptes.pocs.observatory import Observatory
from panoptes.pocs.scheduler.observation.bias import BiasObservation
from huntsman.pocs.utils.logger import get_logger
from huntsman.pocs.guide.bisque import Guide
from huntsman.pocs.archive.utils import remove_empty_directories
from huntsman.pocs.scheduler.observation.dark import DarkObservation
from huntsman.pocs.utils.flats import make_flat_field_sequences, make_flat_field_observation
from huntsman.pocs.utils.flats import get_cameras_with_filter
from huntsman.pocs.utils.safety import get_solar_altaz
from huntsman.pocs.camera.group import CameraGroup, dispatch_parallel
from huntsman.pocs.error import NotTwilightError
| [
11748,
640,
198,
6738,
4732,
8019,
1330,
18175,
11,
4732,
37153,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
198,
6738,
3425,
404,
4879,
13,
26791,
1330,
4049,
198,
6738,
3425,
404,
4879,
13,
26791,
13,
26791,
1330,
651,
62,
40... | 3.180272 | 294 |
import requests
URL = 'https://scholar.googleusercontent.com/citations?view_op=export_citations&user=JtSAIqgAAAAJ&citsig=AMD79ooAAAAAYEerXzdIALaAeL3goamu28BB1p8NLHDg&hl=en'
page = requests.get(URL)
print(type(page.content)) | [
11748,
7007,
198,
198,
21886,
796,
705,
5450,
1378,
20601,
6192,
13,
13297,
43667,
13,
785,
14,
66,
20597,
30,
1177,
62,
404,
28,
39344,
62,
66,
20597,
5,
7220,
28,
41,
83,
4090,
40,
80,
70,
17922,
41,
5,
66,
896,
328,
28,
28075... | 2.368421 | 95 |
import matplotlib.pyplot as plt
import pandas as pd
from .legend_picker import *
from .helpers import *
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
764,
1455,
437,
62,
79,
15799,
1330,
1635,
198,
6738,
764,
16794,
364,
1330,
1635,
628
] | 2.944444 | 36 |
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from directory import Directory
import webapp2
import jinja2
import os
from google.appengine.api import users
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
| [
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
44812,
8095,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
299,
9945,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
13,
12384,
1324,
1330,
44812,
8095,
62,
4993,
8116,
198,
6738,
8619,
... | 3.007353 | 136 |
import uuid
import numpy as np
from matplotlib import pyplot as plt, cm
def create_figure_and_axes(size_pixels):
"""Initializes a unique figure and axes for plotting."""
fig, ax = plt.subplots(1, 1, num=uuid.uuid4())
# Sets output image to pixel resolution.
dpi = 100
size_inches = size_pixels / dpi
fig.set_size_inches([size_inches, size_inches])
fig.set_dpi(dpi)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.xaxis.label.set_color('black')
ax.tick_params(axis='x', colors='black')
ax.yaxis.label.set_color('black')
ax.tick_params(axis='y', colors='black')
fig.set_tight_layout(True)
ax.grid(False)
return fig, ax
def fig_canvas_image(fig):
"""Returns a [H, W, 3] uint8 np.array image from fig.canvas.tostring_rgb()."""
# Just enough margin in the figure to display xticks and yticks.
fig.subplots_adjust(
left=0.08, bottom=0.08, right=0.98, top=0.98, wspace=0.0, hspace=0.0)
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
def get_colormap(num_agents):
"""Compute a color map array of shape [num_agents, 4]."""
colors = cm.get_cmap('jet', num_agents)
colors = colors(range(num_agents))
np.random.shuffle(colors)
return colors
def get_viewport(all_states, all_states_mask):
"""Gets the region containing the data.
Args:
all_states: states of agents as an array of shape [num_agents, num_steps,
2].
all_states_mask: binary mask of shape [num_agents, num_steps] for
`all_states`.
Returns:
center_y: float. y coordinate for center of data.
center_x: float. x coordinate for center of data.
width: float. Width of data.
"""
valid_states = all_states[all_states_mask]
all_y = valid_states[..., 1]
all_x = valid_states[..., 0]
center_y = (np.max(all_y) + np.min(all_y)) / 2
center_x = (np.max(all_x) + np.min(all_x)) / 2
range_y = np.ptp(all_y)
range_x = np.ptp(all_x)
width = max(range_y, range_x)
return center_y, center_x, width
def visualize_one_step(states,
mask,
roadgraph,
title,
center_y,
center_x,
width,
color_map,
size_pixels=1000):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=3,
color=colors,
)
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def visualize_one_step_with_future(states, mask, future_states, future_states_mask, roadgraph, title,
center_y, center_x, width, color_map, size_pixels=1000, predictions=None, confs=None):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=4,
color=colors,
)
for ped in range(128):
maskeds_x = []
maskeds_y = []
for step in range(future_states.shape[1]):
if not future_states_mask[ped,step]:
continue
masked_x = future_states[ped, step, 0] #[future_states_mask[:,step]]
masked_y = future_states[ped, step, 1] #[future_states_mask[:,step]]
maskeds_x.append(masked_x)
maskeds_y.append(masked_y)
colors = color_map[ped] #+ np.array([0.3,0.3,0.3,0.3])
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=4,
color=colors,
)
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=2,
color=np.array([181, 179, 92, 255])/255.,
)
nump, timestamps, modalities, datadim = predictions.shape
if predictions is not None:
for ped in range(128):
if future_states_mask[ped].sum() == 0:
continue
for modality in range(modalities):
maskeds_x = []
maskeds_y = []
for step in range(timestamps):
if not future_states_mask[ped, step]:
continue
if [future_states_mask[ped, step]]:
masked_x = predictions[ped, step, modality, 0]
masked_y = predictions[ped, step, modality, 1]
maskeds_x.append(masked_x)
maskeds_y.append(masked_y)
colors = color_map[ped]
# ax.scatter(
# masked_x,
# masked_y,
# marker='o',
# linewidths=0.05,
# color=colors,
# )
conf = confs[ped, modality].detach().cpu().item()
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=3*conf,
color=colors - np.array([0, 0, 0, 1-conf]),
)
ax.text(maskeds_x[-1], maskeds_y[-1], f"{conf:.2f}",
fontsize="xx-small")
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def visualize_all_agents_smooth(
decoded_example,
size_pixels=1000,
):
"""Visualizes all agent predicted trajectories in a serie of images.
Args:
decoded_example: Dictionary containing agent info about all modeled agents.
size_pixels: The size in pixels of the output image.
Returns:
T of [H, W, 3] uint8 np.arrays of the drawn matplotlib's figure canvas.
"""
# [num_agents, num_past_steps, 2] float32.
center_x, center_y, color_map, current_states, current_states_mask, future_states, future_states_mask, \
num_future_steps, num_past_steps, past_states, past_states_mask, roadgraph_xyz, width = prepare_data_for_vis(
decoded_example)
images = []
# Generate images from past time steps.
for i, (s, m) in enumerate(
zip(
np.split(past_states, num_past_steps, 1),
np.split(past_states_mask, num_past_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'past: %d' % (num_past_steps - i), center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate one image for the current time step.
s = current_states
m = current_states_mask
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz, 'current', center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate images from future time steps.
for i, (s, m) in enumerate(
zip(
np.split(future_states, num_future_steps, 1),
np.split(future_states_mask, num_future_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'future: %d' % (i + 1), center_y, center_x, width,
color_map, size_pixels)
images.append(im)
return images
| [
11748,
334,
27112,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
11,
12067,
628,
198,
4299,
2251,
62,
26875,
62,
392,
62,
897,
274,
7,
7857,
62,
79,
14810,
2599,
198,
220,
2... | 1.986927 | 4,360 |
import PySimpleGUI as sg
import os
| [
11748,
9485,
26437,
40156,
355,
264,
70,
198,
11748,
28686,
198
] | 3.181818 | 11 |
#!/usr/bin/env python3
import urllib.error
import urllib.request
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
2956,
297,
571,
13,
18224,
198,
11748,
2956,
297,
571,
13,
25927,
198
] | 2.708333 | 24 |
from nose.tools import eq_, ok_
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Participant
from .base import ManageTestCase
| [
6738,
9686,
13,
31391,
1330,
37430,
62,
11,
12876,
62,
198,
198,
6738,
1257,
69,
9548,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
6738,
1633,
5908,
16496,
13,
12417,
13,
27530,
1330,
29880,
198,
6738,
764,
8692,
1330,
1869,
496,
... | 3.533333 | 45 |
import networkx as nx
import matplotlib.pyplot as plt
CEMM_COL1 = ( 0/255, 85/255, 100/255)
CEMM_COL2 = ( 0/255, 140/255, 160/255)
CEMM_COL3 = ( 64/255, 185/255, 212/255)
CEMM_COL4 = (212/255, 236/255, 242/255)
DARK_CEMM_COL1 = (0/255, 43/255, 50/255)
BAR_COL = (0.639, 0.639, 0.639)
# -------------------------------- ACTUAL PLOTS --------------------------------
| [
11748,
3127,
87,
355,
299,
87,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
34,
3620,
44,
62,
25154,
16,
796,
357,
220,
657,
14,
13381,
11,
220,
7600,
14,
13381,
11,
1802,
14,
13381,
8,
198,
34,
3620,
... | 2.288344 | 163 |
import pytest
pytestmark = [pytest.mark.django_db]
| [
11748,
12972,
9288,
198,
198,
9078,
9288,
4102,
796,
685,
9078,
9288,
13,
4102,
13,
28241,
14208,
62,
9945,
60,
628,
198
] | 2.454545 | 22 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: rpc
import flatbuffers
# /// \brief used for extra headers, ala HTTP
# /// The use case for the core is to support
# /// zipkin/google-Dapper style tracing
# /// alows for binary search lookup
# /// use with CreateVectorOfSortedTables<> instead of the CreateVector
# dynamic_header
# dynamic_header
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
198,
198,
2,
25745,
25,
374,
14751,
198,
198,
11748,
6228,
36873,
364,
198,
198,
2,
34013,
3467,
65,
3796,
973,
329,
3131,
24697,
11,
435,
64,
14626,
198,
2,
... | 3.54955 | 111 |
#!/usr/bin/env python
import argparse
import os
import os.path as osp
import glob
import numpy as np
import sys
import cPickle
from time import time
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib/'))
sys.path.insert(0, osp.join(this_dir, '../../src'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from fast_rcnn.nms_wrapper import nms
from tpn.evaluate import write_ilsvrc_results_file
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('track_dir',
help='Directory that contains all track detection results.')
parser.add_argument('vid_dir')
parser.add_argument('image_list')
parser.add_argument('score_key')
parser.add_argument('box_key')
parser.add_argument('output_dir')
parser.add_argument('--results', type=str, default='',
help='Result file.')
parser.add_argument('--thres', type=float, default=0.01,
help='Detection score threshold. [0.01]')
parser.add_argument('--num_classes', type=int, default=31,
help='Number of classes. [31]')
parser.add_argument('--max_per_image', type=int, default=100,
help='Maximum detection in each image. [100]')
args = parser.parse_args()
# read image_list
with open(args.image_list, 'r') as f:
image_list = dict([line.strip().split() for line in f])
num_classes = args.num_classes
all_boxes = [[[] for _ in xrange(len(image_list))]
for _ in xrange(num_classes)]
# process vid detections
tracks = sorted(glob.glob(osp.join(args.track_dir, '*')))
for track_path in tracks:
print track_path
vid_name = osp.split(track_path)[-1].split('.')[0]
vid_proto = proto_load(osp.join(args.vid_dir, vid_name + '.vid'))
track_proto = proto_load(track_path)
for frame in vid_proto['frames']:
frame_name = osp.join(vid_name, osp.splitext(frame['path'])[0])
if frame_name not in image_list.keys(): continue
frame_idx = frame['frame']
sub_idx = int(image_list[frame_name])
global_idx = sub_idx - 1
start_time = time()
scores, boxes = _frame_dets(track_proto['tracks'], frame_idx,
args.score_key, args.box_key)
boxes = boxes.reshape((boxes.shape[0], -1))
for j in xrange(1, num_classes):
inds = np.where(scores[:, j] > args.thres)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, 0.3, force_cpu=True)
cls_dets = cls_dets[keep, :]
all_boxes[j][global_idx] = cls_dets
# Limit to max_per_image detections *over all classes*
if args.max_per_image > 0:
image_scores = np.hstack([all_boxes[j][global_idx][:, -1]
for j in xrange(1, num_classes)])
if len(image_scores) > args.max_per_image:
image_thresh = np.sort(image_scores)[-args.max_per_image]
for j in xrange(1, num_classes):
keep = np.where(all_boxes[j][global_idx][:, -1] >= image_thresh)[0]
all_boxes[j][global_idx] = all_boxes[j][global_idx][keep, :]
end_time = time()
print "{}/{}: {:.03f} s".format(sub_idx, len(image_list), end_time - start_time)
sys.stdout.flush()
det_file = osp.join(args.output_dir, 'detections.pkl')
if not osp.isdir(args.output_dir):
os.makedirs(args.output_dir)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
if args.results:
with open(args.results, 'w') as f:
write_ilsvrc_results_file(all_boxes, f, thres=args.thres)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
269,
31686,
... | 2.076215 | 1,955 |
from __future__ import unicode_literals
import frappe
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
5306,
27768,
628
] | 3.666667 | 15 |
# -*- coding: utf-8 -*-
def is_prime(number):
"""Check if a number is a prime number.
Args:
number (int): Number.
Returns:
bool: Return True if number is a prime number and False if not.
"""
if number <= 1:
return False
for x in range(2, number):
if not number % x:
return False
return True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
4299,
318,
62,
35505,
7,
17618,
2599,
198,
220,
220,
220,
37227,
9787,
611,
257,
1271,
318,
257,
6994,
1271,
13,
628,
220,
220,
220,
943,
14542,
25,
198,
220,
220,
2... | 2.33121 | 157 |
"""
Created on Thu Aug 2 19:42:10 2021
@author: Mohammed Ahsan
-------------------------------------------------------
APP REVIEW SCRAPER - ioS and google Store.
version : 1.0
Build name : RedSparrow -----
-------------------------------------------------------
"""
from app_store_scraper import AppStore
from pprint import pprint
import json
from bson import json_util
import datetime
import csv
# CSV
# Updated list to store all of the game names.
updatedlist = []
# A new list to store all of the game names from the CSV File.
results = []
# CONVERT THE FIELDS IN THE CSV INTO A NEW LIST called as results.
# Add the app names as new rows into the testnames.csv file. The app names are the app id's for the scrapper.
# testnames.csv consist of all the app names as new rows .
with open('testnames.csv', newline='') as inputfile:
for row in csv.reader(inputfile):
results.append(row[0])
# USE list incase if the reading from csv is unsuccessfull.
# The list of app names that we would want the reviews of .
#appnames = ["monopoly"]
#appnames = ["monopoly","Fidget Toys Trading 3D","Flow Free","Two Dots","Blackout Bingo - Bingo Game","Pull the Pin","Parking Jam 3D","Adorable Home"," Match 3D"," Terraforming Mars","The Game of Life 2","Jigsaw Puzzle","Coin Pusher - Lucky Dozer Game"]
# List of app. names the reviews to.
# Iterate through the results list to fetch the reviews of all of the apps - list with field names from CSV.
for i in results :
output = AppStore(country="us", app_name=i)
output.review(how_many=5)
updatedlist.append(output.reviews)
# print the output.
print(updatedlist)
# write the reviews to a text file as output.
with open('OUTPUTFILEAPPS.txt', 'w', encoding='utf-8') as f:
f.write(str(updatedlist))
# Convert the Scraped data into JSON.
with open("OUTPUTJSON.json", 'w') as file :
file.write((json.dumps(output.reviews,default=json_util.default, indent=0, sort_keys= False)))
# TESTING.
"""
# Fetch the App using country name and app name
output = AppStore(country="us", app_name="Fidget Toys Trading 3D")
# Count of how many reviews
output.review(how_many=10000)
# updated list to store the reviews.
updatedlist = []
# Add the reviews to the list
updatedlist.append(output.reviews)
# Write the Output into a TEXT file.
with open('APPREVIEWS.txt', 'w', encoding='utf-8') as f:
f.write(str(updatedlist))
# Convert the list to JSON.
print(updatedlist)
print(type(updatedlist))
#pprint(monopoly.reviews)
#pprint(monopoly.reviews_count)
"""
# CSV_2
"""
# iterate through the list to fetch the reviews of all of the apps. in the appnames list.
or i in lists_from_csv :
output = AppStore(country="us", app_name=i)
output.review(how_many=20)f
updatedlist.append(output.reviews)
"""
| [
37811,
198,
41972,
319,
26223,
2447,
220,
362,
678,
25,
3682,
25,
940,
33448,
198,
31,
9800,
25,
19773,
7900,
12807,
198,
3880,
19351,
6329,
198,
24805,
4526,
28206,
6374,
49,
2969,
1137,
532,
33245,
50,
290,
23645,
9363,
13,
220,
198... | 2.960986 | 974 |
#!/usr/bin/env python
#
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
version = {}
with open("muspinsim/version.py") as fp:
exec(fp.read(), version)
setuptools.setup(
name="muspinsim",
version=version["__version__"],
author="Simone Sturniolo",
author_email="simonesturniolo@gmail.com",
description="Full quantum simulation of muon experiments",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/muon-spectroscopy-computational-project/muspinsim.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Information Analysis",
],
install_requires=["numpy", "scipy", "soprano", "lark"],
extras_require={
"docs": ["mkdocs", "pymdown-extensions"],
"dev": ["flake8", "black>=22.3.0", "pytest", "pre-commit"],
},
entry_points={
"console_scripts": [
"muspinsim = muspinsim.__main__:main",
"muspinsim.mpi = muspinsim.__main__:main_mpi",
]
},
python_requires=">=3.6",
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890... | 2.4941 | 678 |
from . import models
from .InitalCondition import InitialCondition
from .BoundaryCondition import PeriodicBC
from .BoundaryCondition import DirichletBC
from .BoundaryCondition import RobinBC
from .BoundaryCondition import TimeDerivativeBC
from .BoundaryCondition import NeumannBC
from .PDELoss import PDELoss
from .HPMLoss import HPMLoss
from .Logger_Interface import LoggerInterface
from .WandB_Logger import WandbLogger
from .TensorBoard_Logger import TensorBoardLogger
from .PINN import PINN
import PINNFramework.models
import PINNFramework.callbacks
__all__ = [
'InitialCondition',
'PeriodicBC',
'DirichletBC',
'RobinBC',
'NeumannBC',
'TimeDerivativeBC',
'PDELoss',
'HPMLoss',
'PINN',
'models',
'LoggerInterface',
'WandbLogger',
'TensorBoardLogger',
'callbacks']
| [
6738,
764,
1330,
4981,
198,
6738,
764,
818,
1287,
48362,
1330,
20768,
48362,
198,
6738,
764,
49646,
560,
48362,
1330,
18581,
291,
2749,
198,
6738,
764,
49646,
560,
48362,
1330,
36202,
488,
1616,
2749,
198,
6738,
764,
49646,
560,
48362,
... | 2.878049 | 287 |
from motion_primitives_py import MotionPrimitive
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
from scipy.linalg import expm
import scipy.integrate as integrate
class EuclideanMotionPrimitive(MotionPrimitive):
"""
A motion primitive that is just a straight line, with the norm of the distance between start and goal as the cost.
"""
@classmethod
def from_dict(cls, dict, num_dims, max_state, subclass_specific_data={}):
"""
Load a inputs representation of a motion primitive from a dictionary
"""
return super().from_dict(dict, num_dims, max_state)
def to_dict(self):
"""
Write important attributes of motion primitive to a dictionary
"""
return super().to_dict()
if __name__ == "__main__":
# problem parameters
num_dims = 2
control_space_q = 3
# setup problem
start_state = np.zeros((num_dims * control_space_q,))
# end_state = np.random.rand(num_dims * control_space_q,)
end_state = np.ones_like(start_state)
end_state[0] = 2
max_state = 1 * np.ones((control_space_q+1,))
# polynomial
mp = EuclideanMotionPrimitive(start_state, end_state, num_dims, max_state)
# save
assert(mp.is_valid)
assert(np.array_equal(mp.end_state, end_state))
print(mp.cost)
dictionary = mp.to_dict()
# reconstruct
mp = EuclideanMotionPrimitive.from_dict(dictionary, num_dims, max_state)
# plot
mp.plot(position_only=True)
plt.show()
| [
6738,
6268,
62,
19795,
20288,
62,
9078,
1330,
20843,
23828,
1800,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
10558,
88,
355,
5659,
198,
6738,
629,
541,
88,
13,
75,
129... | 2.59215 | 586 |
import numpy as np
from matplotlib import pyplot as plt
from koala.pointsets import generate_bluenoise
from koala.voronization import generate_lattice
from koala.graph_color import edge_color
from koala.plotting import plot_lattice, plot_vertex_indices, plot_degeneracy_breaking, plot_plaquettes, line_intersection
from koala.voronization import Lattice
from koala import plotting, example_graphs
h = Lattice(
vertices = np.array([[0.5,0.5], [0.1,0.1], [0.5,0.9], [0.9,0.1]]),
edge_indices = np.array([[0,1],[0,2],[0,3]]),
edge_crossing = np.array([[0,0],[0,0],[0,0]]),
)
n = 10
points = generate_bluenoise(30,n,n)
voronoi_lattice = generate_lattice(points)
test_lattices = [voronoi_lattice,h]
def plotting_test(plotting_function, lattice, N):
"""
A helper script to test plot_vertices, plot_edges and plot_plaquettes
because they have identical interfaces.
:param plotting_function: plotting function
:type plotting_function: function
"""
# Simplest call
plotting_function(lattice)
# Explicit ax
f, ax = plt.subplots()
plotting_function(lattice)
# Adding a single color
plotting_function(lattice, color_scheme = 'green')
# Adding a color_scheme
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N))
# Use a slice as a subset
subset = slice(0, 10)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
# Use a boolean subset
subset = np.random.randint(2, size = N).astype(bool)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
# Use a list of indices subset
subset = np.random.randint(N, size = 5)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
6738,
41727,
6081,
13,
13033,
1039,
1330,
7716,
62,
65,
2290,
23397,
786,
198,
6738,
41727,
6081,
13,
20867,
261,
1634,
1330,
7716,... | 2.652949 | 729 |
"""
Exercรญcio 3. Escreva um programa que lรช duas notas de vรกrios alunos e armazena tais notas em um dicionรกrio, onde a chave รฉ o nome do aluno.
A entrada de dados deve terminar quando for lida uma string vazia como nome.
Escreva uma funรงรฃo que retorna a mรฉdia do aluno, dado seu nome.
"""
if __name__ == '__main__':
dicionario = le_notas()
nome_aluno = input('\nDigite o nome do aluno que deseja saber a nota: ')
if dicionario and nome_aluno in dicionario.keys():
media = retorna_nota_aluno(dicionario, nome_aluno)
print(f'{nome_aluno}: {media}')
| [
37811,
198,
220,
220,
220,
1475,
2798,
8836,
66,
952,
513,
13,
16319,
260,
6862,
23781,
1430,
64,
8358,
300,
25792,
7043,
292,
407,
292,
390,
410,
6557,
380,
418,
435,
403,
418,
304,
3211,
1031,
8107,
256,
15152,
407,
292,
795,
2378... | 2.272031 | 261 |
import json
import os.path
import syslog
| [
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
25064,
6404,
628,
198
] | 3.307692 | 13 |
"""
--------------------------------------------------
File Name : grid_search.py
Creation Date : 2019-06-27 N 10:37
Last Modified : 2019-06-27 N 10:41
Created By : Joonatan Samuel
--------------------------------------------------
"""
from sklearn.model_selection import cross_validate
from pprint import pprint
# ---- Choose a bunch of models ----
import sklearn.ensemble
import sklearn.linear_model
import sklearn.neighbors
classifiers = {
'Random Forest': sklearn.ensemble.RandomForestClassifier(),
'Logistic Regression': sklearn.linear_model.LogisticRegression(),
'Nearest Neighbors': sklearn.neighbors.KNeighborsClassifier()
}
parameter_sets = {
'Random Forest': [{'n_estimators': [1, 5, 10, 15, 25, 35],
'max_depth': [1, 2, 3, 5, 7, 10]}
],
'Logistic Regression': [{'penalty': ['l1', 'l2'],
'C': [0.1, 0.3, 1, 3, 10, 30, 100]}
],
# Very slow for some reason,
# probably underlying implementation is slow
#
#'Support Vector Machine': [
# {'kernel': ['linear'],
# 'C': [1, 10, 100, 1000]}
# ],
'Nearest Neighbors': [{'n_neighbors': range(1, 25, 3)}]
}
# TODO: rewrite this for loop to use this:
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
for name, model in classifiers.items():
n_folds = 3
scores = cross_validate(model, X, y, cv=n_folds, return_train_score=True)
print("---- model {} ----".format(name))
for fold in range(n_folds):
print("Fold {} \t\t train score {:.2f}\t\t test score {:.2f}".format(
fold,
scores["train_score"][fold],
scores["test_score"][fold]
))
print()
| [
37811,
198,
220,
20368,
1783,
438,
198,
220,
9220,
6530,
1058,
10706,
62,
12947,
13,
9078,
198,
220,
21582,
7536,
1058,
13130,
12,
3312,
12,
1983,
399,
838,
25,
2718,
198,
220,
4586,
40499,
1058,
13130,
12,
3312,
12,
1983,
399,
838,
... | 2.285539 | 816 |
# ------------------------------------------------------------------------------
# Functions for parsing and rendering inline markup.
# ------------------------------------------------------------------------------
import html
import hashlib
import re
# ------------------------------------------------------------------------------
# Regular expressions for identifying inline markup.
# ------------------------------------------------------------------------------
# *x*
re_italic_sc = re.compile(r"\*(\S)\*")
# *foo bar*
re_italic_mc = re.compile(r"\*(\S.*?\S)\*", re.DOTALL)
# **x**
re_bold_sc = re.compile(r"\*{2}(\S)\*{2}")
# **foo bar**
re_bold_mc = re.compile(r"\*{2}(\S.*?\S)\*{2}", re.DOTALL)
# ***x***
re_bolditalic_sc = re.compile(r"\*{3}(\S)\*{3}")
# ***foo bar***
re_bolditalic_mc = re.compile(r"\*{3}(\S.*?\S)\*{3}", re.DOTALL)
# `foo bar`
re_backticks = re.compile(r"`(.+?)`", re.DOTALL)
# [link text](http://example.com)
re_link = re.compile(r"\[([^\]]+)\]\(([^\)]+)\)")
# [link text][ref]
re_ref_link = re.compile(r"\[([^\]]+)\]\[([^\]]*)\]")
# 
re_img = re.compile(r"!\[([^\]]*)\]\(([^\)]+)\)")
# ![alt text][ref]
re_ref_img = re.compile(r"!\[([^\]]*)\]\[([^\]]*)\]")
# [^ref] or [^]
re_footnote_super = re.compile(r"\[\^([^\]]*?)\]")
# [fn:ref] or [fn]
re_footnote_span = re.compile(r"\[fn:?([^\]]*?)\]")
# & '
re_entity = re.compile(r"&[#a-zA-Z0-9]+;")
# html tags: <span>, </span>, <!-- comment -->, etc.
re_html_tag = re.compile(r"<([a-zA-Z/][^>]*?|!--.*?--)>")
# <http://example.com>
re_bracketed_url = re.compile(r"<((?:https?|ftp)://[^>]+)>")
# http://example.com
re_bare_url = re.compile(r"""
(^|\s)
(https?|ftp)
(://[-A-Z0-9+&@#/%?=~_|\[\]\(\)!:,\.;]*[-A-Z0-9+&@#/%=~_|\[\]])
($|\W)
""", re.VERBOSE | re.MULTILINE | re.IGNORECASE)
# n-dash
re_ndash = re.compile(r"((?<=\s)|\b|^)--(?=[ ]|\b|$)", re.MULTILINE)
# m-dash
re_mdash = re.compile(r"((?<=\s)|\b|^)---(?=[ ]|\b|$)", re.MULTILINE)
# x^{2}
re_superscript = re.compile(r"\^\{(.+?)\}")
# H_{2}O
re_subscript = re.compile(r"_\{(.+?)\}")
# ``foo bar``
re_verbatim = re.compile(r"``(.+?)``", re.DOTALL)
# ------------------------------------------------------------------------------
# Renderers.
# ------------------------------------------------------------------------------
# Entry point.
# Hashes a string, stores it as a {digest: string} pair in 'hashes', and
# returns the digest.
| [
2,
16529,
26171,
198,
2,
40480,
329,
32096,
290,
14837,
26098,
41485,
13,
198,
2,
16529,
26171,
198,
198,
11748,
27711,
198,
11748,
12234,
8019,
198,
11748,
302,
628,
198,
2,
16529,
26171,
198,
2,
23603,
14700,
329,
13720,
26098,
41485,... | 2.286506 | 1,082 |
#!/usr/bin/env python
import argparse
import FresnoPython
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--website', action='store_true', help='Open the website.')
parser.add_argument('--map', action='store_true', help='Open the location on Google Maps.')
parser.add_argument('--twitter', action='store_true', help='Open the twitter account.')
args = parser.parse_args()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
39927,
37906,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677... | 3.143791 | 153 |
# dispatch.py
"""
patterns to parse:
1. func arg1 arg2 key1=val1 key2=val2
2. func arg1 arg2 key1:val1 key2:val2
3. func arg1 arg2 dict(key1=val1, key2=val2)
4. obj.func arg1 arg2 key1=val1 key2=val2
"""
obj = Klass()
gdict = {'func': func, 'obj': obj}
s1 = 'func arg1 arg2 key1=val1 key2=val2'
head = lambda x: x[:1]
tail = lambda x: x[1:]
| [
2,
27965,
13,
9078,
198,
198,
37811,
198,
198,
33279,
82,
284,
21136,
25,
198,
198,
16,
13,
25439,
1822,
16,
1822,
17,
1994,
16,
28,
2100,
16,
1994,
17,
28,
2100,
17,
198,
198,
17,
13,
25439,
1822,
16,
1822,
17,
1994,
16,
25,
... | 2.079096 | 177 |
from zeep import Client
from zeep.transports import Transport
from zeep import xsd
from zeep import helpers
import xmltodict
import json
| [
6738,
41271,
538,
1330,
20985,
201,
198,
6738,
41271,
538,
13,
7645,
3742,
1330,
19940,
201,
198,
6738,
41271,
538,
1330,
2124,
21282,
201,
198,
6738,
41271,
538,
1330,
49385,
201,
198,
11748,
2124,
76,
2528,
375,
713,
201,
198,
11748,
... | 1.825 | 120 |
from . import phpast as php
import ast as py
unary_ops = {
'~': py.Invert,
'!': py.Not,
'+': py.UAdd,
'-': py.USub,
}
bool_ops = {
'&&': py.And,
'||': py.Or,
'and': py.And,
'or': py.Or,
}
cmp_ops = {
'!=': py.NotEq,
'!==': py.NotEq,
'<>': py.NotEq,
'<': py.Lt,
'<=': py.LtE,
'==': py.Eq,
'===': py.Eq,
'>': py.Gt,
'>=': py.GtE,
}
binary_ops = {
'+': py.Add,
'-': py.Sub,
'*': py.Mult,
'/': py.Div,
'%': py.Mod,
'<<': py.LShift,
'>>': py.RShift,
'|': py.BitOr,
'&': py.BitAnd,
'^': py.BitXor,
}
casts = {
'double': 'float',
'string': 'str',
'array': 'list',
}
| [
6738,
764,
1330,
872,
30119,
355,
39347,
198,
11748,
6468,
355,
12972,
198,
198,
403,
560,
62,
2840,
796,
1391,
198,
220,
220,
220,
705,
93,
10354,
12972,
13,
818,
1851,
11,
198,
220,
220,
220,
705,
0,
10354,
12972,
13,
3673,
11,
... | 1.672372 | 409 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import logging
import numpy as np
from matrixprofile import core
logger = logging.getLogger(__name__)
_EPS = 1e-14
def _batch_compute(args):
"""
Internal function to compute a batch of the time series in parallel.
Parameters
----------
args : tuple
Various attributes used for computing the batch.
(
batch_start : int
The starting index for this batch.
batch_end : int
The ending index for this batch.
ts : array_like
The time series to compute the matrix profile for.
query : array_like
The query.
window_size : int
The size of the window to compute the profile over.
data_length : int
The number of elements in the time series.
profile_length : int
The number of elements that will be in the final matrix
profile.
exclusion_zone : int
Used to exclude trivial matches.
data_mu : array_like
The moving average over the time series for the given window
size.
data_sig : array_like
The moving standard deviation over the time series for the
given window size.
first_product : array_like
The first sliding dot product for the time series over index
0 to window_size.
skip_locs : array_like
Indices that should be skipped for distance profile calculation
due to a nan or inf.
)
Returns
-------
dict : profile
The matrix profile, left and right matrix profiles and their respective
profile indices.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> }
"""
num_dim, batch_start, batch_end, ts, query, window_size, data_length, \
profile_length, exclusion_zone, data_mu, data_sig, \
first_product, skip_locs, profile_dimension, return_dimension = args
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
left_matrix_profile = None
right_matrix_profile = None
left_profile_index = None
right_profile_index = None
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
# with batch 0 we do not need to recompute the dot product
# however with other batch windows, we need the previous iterations sliding
# dot product
last_product = np.copy(first_product)
if batch_start is 0:
first_window = query[:, batch_start:batch_start + window_size]
else:
first_window = query[:, batch_start - 1:batch_start + window_size - 1]
for i in range(num_dim):
last_product[i, :] = core.fft_convolve(ts[i, :], first_window[i, :])
query_sum = np.sum(first_window, axis=1)
query_2sum = np.sum(first_window**2, axis=1)
query_mu, query_sig = np.empty(num_dim), np.empty(num_dim)
for i in range(num_dim):
query_mu[i], query_sig[i] = core.moving_avg_std(first_window[i, :], window_size)
drop_value = np.empty(num_dim)
for i in range(num_dim):
drop_value[i] = first_window[i, 0]
distance_profile = np.empty((num_dim, profile_length))
# make sure to compute inclusively from batch start to batch end
# otherwise there are gaps in the profile
if batch_end < profile_length:
batch_end += 1
# iteratively compute distance profile and update with element-wise mins
for i in range(batch_start, batch_end):
# check for nan or inf and skip
if skip_locs[i]:
continue
for j in range(num_dim):
if i == 0:
query_window = query[j, i:i + window_size]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, 0,
distance_profile[j, :])
else:
query_window = query[j, i:i + window_size]
query_sum[j] = query_sum[j] - drop_value[j] + query_window[-1]
query_2sum[j] = query_2sum[j] - drop_value[j]**2 + query_window[-1]**2
query_mu[j] = query_sum[j] / window_size
query_sig2 = query_2sum[j] / window_size - query_mu[j]**2
if query_sig2 < _EPS:
query_sig2 = _EPS
query_sig[j] = np.sqrt(query_sig2)
last_product[j, 1:] = last_product[j, 0:data_length - window_size] \
- ts[j, 0:data_length - window_size] * drop_value[j] \
+ ts[j, window_size:] * query_window[-1]
last_product[j, 0] = first_product[j, i]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply the exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, i,
distance_profile[j, :])
distance_profile[j, distance_profile[j, :] < _EPS] = 0
drop_value[j] = query_window[0]
if np.any(query_sig < _EPS):
continue
distance_profile[:, skip_locs] = np.inf
distance_profile[data_sig < np.sqrt(_EPS)] = np.inf
distance_profile_dim = np.argsort(distance_profile, axis=0)
distance_profile_sort = np.sort(distance_profile, axis=0)
distance_profile_cumsum = np.zeros(profile_length)
for j in range(num_dim):
distance_profile_cumsum += distance_profile_sort[j, :]
distance_profile_mean = distance_profile_cumsum / (j + 1)
# update the matrix profile
indices = (distance_profile_mean < matrix_profile[j, :])
matrix_profile[j, indices] = distance_profile_mean[indices]
profile_index[j, indices] = i
if return_dimension:
profile_dimension[j][:, indices] = distance_profile_dim[:j + 1, indices]
# update the left and right matrix profiles
# find differences, shift left and update
indices = distance_profile_mean[i:] < left_matrix_profile[j, i:]
falses = np.zeros(i).astype('bool')
indices = np.append(falses, indices)
left_matrix_profile[j, indices] = distance_profile_mean[indices]
left_profile_index[j, np.argwhere(indices)] = i
# find differences, shift right and update
indices = distance_profile_mean[0:i] < right_matrix_profile[j, 0:i]
falses = np.zeros(profile_length - i).astype('bool')
indices = np.append(indices, falses)
right_matrix_profile[j, indices] = distance_profile_mean[indices]
right_profile_index[j, np.argwhere(indices)] = i
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
}
def mstomp(ts, window_size, return_dimension=False, n_jobs=1):
"""
Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing.
Parameters
----------
ts : array_like, shape (n_dim, seq_len)
The multidimensional time series to compute the multidimensional matrix profile for.
window_size: int
The size of the window to compute the matrix profile over.
return_dimension : bool
if True, also return the matrix profile dimension. It takses O(d^2 n)
to store and O(d^2 n^2) to compute. (default is False)
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "stomp_based_mstamp"
>>> }
Raises
------
ValueError
If window_size < 4.
If window_size > time series length / 2.
If ts is not a list or np.array.
"""
query = ts
# data conversion to np.array
ts = core.to_np_array(ts)
query = core.to_np_array(query)
if window_size < 4:
error = "window size must be at least 4."
raise ValueError(error)
if ts.ndim == 1:
ts = np.expand_dims(ts, axis=0)
query = np.expand_dims(query, axis=0)
if window_size > query.shape[1] / 2:
error = "Time series is too short relative to desired window size"
raise ValueError(error)
# multiprocessing or single threaded approach
if n_jobs == 1:
pass
else:
n_jobs = core.valid_n_jobs(n_jobs)
# precompute some common values - profile length, query length etc.
profile_length = core.get_profile_length(ts, query, window_size)
data_length = ts.shape[1]
query_length = query.shape[1]
num_queries = query_length - window_size + 1
exclusion_zone = int(np.ceil(window_size / 2.0))
num_dim = ts.shape[0]
# find skip locations, clean up nan and inf in the ts and query
skip_locs = core.find_multid_skip_locations(ts, profile_length, window_size)
ts = core.clean_nan_inf(ts)
query = core.clean_nan_inf(query)
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
# profile_index = np.full((num_dim, profile_length), -1)
# compute left and right matrix profile when similarity join does not happen
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
profile_dimension = []
if return_dimension:
n_jobs = 1
for i in range(num_dim):
profile_dimension.append(np.empty((i + 1, profile_length), dtype=int))
# precompute some statistics on ts
data_mu, data_sig, first_product = np.empty((num_dim, profile_length)), np.empty(
(num_dim, profile_length)), np.empty((num_dim, profile_length))
for i in range(num_dim):
data_mu[i, :], data_sig[i, :] = core.moving_avg_std(ts[i, :], window_size)
first_window = query[i, 0:window_size]
first_product[i, :] = core.fft_convolve(ts[i, :], first_window)
batch_windows = []
results = []
# batch compute with multiprocessing
args = []
for start, end in core.generate_batch_jobs(num_queries, n_jobs):
args.append((num_dim, start, end, ts, query, window_size, data_length, profile_length, exclusion_zone, data_mu,
data_sig, first_product, skip_locs, profile_dimension, return_dimension))
batch_windows.append((start, end))
# we are running single threaded stomp - no need to initialize any
# parallel environments.
if n_jobs == 1 or len(args) == 1:
results.append(_batch_compute(args[0]))
else:
# parallelize
with core.mp_pool()(n_jobs) as pool:
results = pool.map(_batch_compute, args)
# now we combine the batch results
if len(results) == 1:
result = results[0]
matrix_profile = result['mp']
profile_index = result['pi']
profile_dimension = result['pd']
left_matrix_profile = result['lmp']
left_profile_index = result['lpi']
right_matrix_profile = result['rmp']
right_profile_index = result['rpi']
else:
for index, result in enumerate(results):
start = batch_windows[index][0]
end = batch_windows[index][1]
# update the matrix profile
indices = result['mp'] < matrix_profile
matrix_profile[indices] = result['mp'][indices]
profile_index[indices] = result['pi'][indices]
# update the left and right matrix profiles
indices = result['lmp'] < left_matrix_profile
left_matrix_profile[indices] = result['lmp'][indices]
left_profile_index[indices] = result['lpi'][indices]
indices = result['rmp'] < right_matrix_profile
right_matrix_profile[indices] = result['rmp'][indices]
right_profile_index[indices] = result['rpi'][indices]
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
'metric': 'euclidean',
'w': window_size,
'ez': exclusion_zone,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': "MatrixProfile",
'algorithm': "stomp_based_mstamp"
} | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,... | 2.247886 | 6,624 |
import sale
import purchase
import account_invoice
import account_voucher
import stock
import wizard
import partner
import res_config | [
11748,
5466,
198,
11748,
5001,
198,
11748,
1848,
62,
16340,
2942,
198,
11748,
1848,
62,
85,
280,
2044,
198,
11748,
4283,
198,
11748,
18731,
198,
11748,
5212,
198,
11748,
581,
62,
11250
] | 4.15625 | 32 |
import array
numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
numbers_array = array('i', numbers) # ERROR
| [
11748,
7177,
201,
198,
201,
198,
77,
17024,
796,
685,
15,
11,
352,
11,
362,
11,
513,
11,
604,
11,
642,
11,
718,
11,
767,
11,
807,
11,
860,
60,
201,
198,
77,
17024,
62,
18747,
796,
7177,
10786,
72,
3256,
3146,
8,
1303,
33854,
2... | 2.191489 | 47 |
import argparse
import pickle
import re
from collections import Counter
from os import walk, mkdir, makedirs
from os.path import relpath, join, exists
from typing import Set
from tqdm import tqdm
from docqa import config
from docqa.config import CORPUS_DIR
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
from docqa.triviaqa.read_data import normalize_wiki_filename
from docqa.utils import group, split, flatten_iterable
"""
Build and cache a tokenized version of the evidence corpus
"""
def build_tokenized_files(filenames, input_root, output_root, tokenizer, override=True) -> Set[str]:
"""
For each file in `filenames` loads the text, tokenizes it with `tokenizer, and
saves the output to the same relative location in `output_root`.
@:return a set of all the individual words seen
"""
voc = set()
for filename in filenames:
out_file = normalize_wiki_filename(filename[:filename.rfind(".")]) + ".txt"
out_file = join(output_root, out_file)
if not override and exists(out_file):
continue
with open(join(input_root, filename), "r") as in_file:
text = in_file.read().strip()
paras = [x for x in text.split("\n") if len(x) > 0]
paragraphs = [tokenizer.tokenize_paragraph(x) for x in paras]
for para in paragraphs:
for i, sent in enumerate(para):
voc.update(sent)
with open(join(output_root, out_file), "w") as in_file:
in_file.write("\n\n".join("\n".join(" ".join(sent) for sent in para) for para in paragraphs))
return voc
class TriviaQaEvidenceCorpusTxt(object):
"""
Corpus of the tokenized text from the given TriviaQa evidence documents.
Allows the text to be retrieved by document id
"""
_split_all = re.compile("[\n ]")
_split_para = re.compile("\n\n+") # FIXME we should not have saved document w/extra spaces...
if __name__ == "__main__":
main() | [
11748,
1822,
29572,
198,
11748,
2298,
293,
198,
11748,
302,
198,
6738,
17268,
1330,
15034,
198,
6738,
28686,
1330,
2513,
11,
33480,
15908,
11,
285,
4335,
17062,
198,
6738,
28686,
13,
6978,
1330,
823,
6978,
11,
4654,
11,
7160,
198,
6738,... | 2.703552 | 732 |
"""
See Challenge ROADEF 2001 (FAPP: Problรจme d'affectation de frรฉquences avec polarization)
Examples of Execution:
python3 Fapp.py -data=Fapp_ex2.json
python3 Fapp.py -data=Fapp_ex2.json -variant=short
"""
from pycsp3 import *
domains, routes, hard_constraints, soft_constraints = data
domains = [domains[route.domain] for route in routes] # we skip the indirection
polarizations = [route.polarization for route in routes]
n, nSofts = len(routes), len(data.softs)
# f[i] is the frequency of the ith radio-link
f = VarArray(size=n, dom=lambda i: domains[i])
# p[i] is the polarization of the ith radio-link
p = VarArray(size=n, dom=lambda i: {0, 1} if polarizations[i] == 0 else {1} if polarizations[i] == 1 else {0})
# k is the relaxation level to be optimized
k = Var(dom=range(12))
# v1[q] is 1 iff the qth pair of radio-electric compatibility constraints is violated when relaxing another level
v1 = VarArray(size=nSofts, dom={0, 1})
# v2[q] is the number of times the qth pair of radio-electric compatibility constraints is violated when relaxing more than one level
v2 = VarArray(size=nSofts, dom=range(11))
satisfy(
# imperative constraints
dst == gap if eq else dst != gap for (dst, eq, gap) in [(abs(f[i] - f[j] if fq else p[i] - p[j]), eq, gap) for (i, j, fq, eq, gap) in hard_constraints]
)
if not variant():
satisfy(
# soft radio-electric compatibility constraints
(f[i], f[j], p[i], p[j], k, v1[l], v2[l]) in table_soft(i, j, tuple(eqr), tuple(ner), False) for l, (i, j, eqr, ner) in enumerate(soft_constraints)
)
elif variant("short"):
soft_links = [[False] * n for _ in range(n)]
for c in data.softs:
soft_links[c.route1][c.route2] = soft_links[c.route2][c.route1] = True
# d[i][j] is the distance between the ith and the jth frequencies (for i < j when a soft link exists)
d = VarArray(size=[n, n], dom=lambda i, j: {abs(f1 - f2) for f1 in domains[i] for f2 in domains[j]} if i < j and soft_links[i][j] else None)
satisfy(
# computing intermediary distances
[d[i][j] == abs(f[i] - f[j]) for i, j in combinations(range(n), 2) if d[i][j]],
# soft radio-electric compatibility constraints
[(d[min(i, j)][max(i, j)], p[i], p[j], k, v1[l], v2[l]) in table_soft(i, j, tuple(er), tuple(nr)) for l, (i, j, er, nr) in enumerate(soft_constraints)]
)
minimize(
k * (10 * nSofts ** 2) + Sum(v1) * (10 * nSofts) + Sum(v2)
)
""" Comments
1) we transform lists in tuples of relaxation arrays for speeding up calculations
2) when gap is 0, abs(x - y) == gap (resp., abs(x - y) != gap) is automatically simplified into x == y (resp., x != y)
"""
| [
37811,
198,
6214,
13879,
15107,
19266,
37,
5878,
357,
37,
24805,
25,
1041,
2436,
14064,
1326,
288,
6,
2001,
478,
341,
390,
1216,
2634,
421,
3007,
257,
35138,
42704,
8,
198,
198,
27730,
286,
37497,
25,
198,
220,
21015,
18,
376,
1324,
... | 2.566346 | 1,040 |
'Generated protocol buffer code.'
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='tensorflow/lite/tools/evaluation/proto/preprocessing_steps.proto', package='tflite.evaluation', syntax='proto2', serialized_options=b'\n\x11tflite.evaluationP\x01\xf8\x01\x01', create_key=_descriptor._internal_create_key, serialized_pb=b'\n@tensorflow/lite/tools/evaluation/proto/preprocessing_steps.proto\x12\x11tflite.evaluation"\xa8\x02\n\x1cImagePreprocessingStepParams\x12<\n\x0fcropping_params\x18\x01 \x01(\x0b2!.tflite.evaluation.CroppingParamsH\x00\x12<\n\x0fresizing_params\x18\x02 \x01(\x0b2!.tflite.evaluation.ResizingParamsH\x00\x12:\n\x0epadding_params\x18\x03 \x01(\x0b2 .tflite.evaluation.PaddingParamsH\x00\x12F\n\x14normalization_params\x18\x04 \x01(\x0b2&.tflite.evaluation.NormalizationParamsH\x00B\x08\n\x06params"*\n\tImageSize\x12\r\n\x05width\x18\x01 \x02(\r\x12\x0e\n\x06height\x18\x02 \x02(\r"\x8c\x01\n\x0eCroppingParams\x12"\n\x11cropping_fraction\x18\x01 \x01(\x02:\x050.875H\x00\x123\n\x0btarget_size\x18\x02 \x01(\x0b2\x1c.tflite.evaluation.ImageSizeH\x00\x12\x17\n\x0fsquare_cropping\x18\x03 \x01(\x08B\x08\n\x06params"^\n\x0eResizingParams\x121\n\x0btarget_size\x18\x01 \x02(\x0b2\x1c.tflite.evaluation.ImageSize\x12\x19\n\x11aspect_preserving\x18\x02 \x02(\x08"\x7f\n\rPaddingParams\x123\n\x0btarget_size\x18\x01 \x01(\x0b2\x1c.tflite.evaluation.ImageSizeH\x00\x12\x18\n\x0esquare_padding\x18\x02 \x01(\x08H\x00\x12\x15\n\rpadding_value\x18\x03 \x02(\x05B\x08\n\x06params"\xe1\x01\n\x13NormalizationParams\x12\x1a\n\x10channelwise_mean\x18\x01 \x01(\x02H\x00\x12L\n\x05means\x18\x02 \x01(\x0b2;.tflite.evaluation.NormalizationParams.PerChannelMeanValuesH\x00\x12\x10\n\x05scale\x18\x03 \x02(\x02:\x011\x1aF\n\x14PerChannelMeanValues\x12\x0e\n\x06r_mean\x18\x01 \x02(\x02\x12\x0e\n\x06g_mean\x18\x02 \x02(\x02\x12\x0e\n\x06b_mean\x18\x03 \x02(\x02B\x06\n\x04meanB\x18\n\x11tflite.evaluationP\x01\xf8\x01\x01')
_IMAGEPREPROCESSINGSTEPPARAMS = _descriptor.Descriptor(name='ImagePreprocessingStepParams', full_name='tflite.evaluation.ImagePreprocessingStepParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='cropping_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.cropping_params', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='resizing_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.resizing_params', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='padding_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.padding_params', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='normalization_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.normalization_params', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.ImagePreprocessingStepParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=88, serialized_end=384)
_IMAGESIZE = _descriptor.Descriptor(name='ImageSize', full_name='tflite.evaluation.ImageSize', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='width', full_name='tflite.evaluation.ImageSize.width', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='height', full_name='tflite.evaluation.ImageSize.height', index=1, number=2, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=386, serialized_end=428)
_CROPPINGPARAMS = _descriptor.Descriptor(name='CroppingParams', full_name='tflite.evaluation.CroppingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='cropping_fraction', full_name='tflite.evaluation.CroppingParams.cropping_fraction', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.875), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.CroppingParams.target_size', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='square_cropping', full_name='tflite.evaluation.CroppingParams.square_cropping', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.CroppingParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=431, serialized_end=571)
_RESIZINGPARAMS = _descriptor.Descriptor(name='ResizingParams', full_name='tflite.evaluation.ResizingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.ResizingParams.target_size', index=0, number=1, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='aspect_preserving', full_name='tflite.evaluation.ResizingParams.aspect_preserving', index=1, number=2, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=573, serialized_end=667)
_PADDINGPARAMS = _descriptor.Descriptor(name='PaddingParams', full_name='tflite.evaluation.PaddingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.PaddingParams.target_size', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='square_padding', full_name='tflite.evaluation.PaddingParams.square_padding', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='padding_value', full_name='tflite.evaluation.PaddingParams.padding_value', index=2, number=3, type=5, cpp_type=1, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.PaddingParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=669, serialized_end=796)
_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES = _descriptor.Descriptor(name='PerChannelMeanValues', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='r_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.r_mean', index=0, number=1, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='g_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.g_mean', index=1, number=2, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='b_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.b_mean', index=2, number=3, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=946, serialized_end=1016)
_NORMALIZATIONPARAMS = _descriptor.Descriptor(name='NormalizationParams', full_name='tflite.evaluation.NormalizationParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='channelwise_mean', full_name='tflite.evaluation.NormalizationParams.channelwise_mean', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='means', full_name='tflite.evaluation.NormalizationParams.means', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='scale', full_name='tflite.evaluation.NormalizationParams.scale', index=2, number=3, type=2, cpp_type=6, label=2, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='mean', full_name='tflite.evaluation.NormalizationParams.mean', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=799, serialized_end=1024)
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'].message_type = _CROPPINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'].message_type = _RESIZINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'].message_type = _PADDINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'].message_type = _NORMALIZATIONPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_CROPPINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_CROPPINGPARAMS.oneofs_by_name['params'].fields.append(_CROPPINGPARAMS.fields_by_name['cropping_fraction'])
_CROPPINGPARAMS.fields_by_name['cropping_fraction'].containing_oneof = _CROPPINGPARAMS.oneofs_by_name['params']
_CROPPINGPARAMS.oneofs_by_name['params'].fields.append(_CROPPINGPARAMS.fields_by_name['target_size'])
_CROPPINGPARAMS.fields_by_name['target_size'].containing_oneof = _CROPPINGPARAMS.oneofs_by_name['params']
_RESIZINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_PADDINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_PADDINGPARAMS.oneofs_by_name['params'].fields.append(_PADDINGPARAMS.fields_by_name['target_size'])
_PADDINGPARAMS.fields_by_name['target_size'].containing_oneof = _PADDINGPARAMS.oneofs_by_name['params']
_PADDINGPARAMS.oneofs_by_name['params'].fields.append(_PADDINGPARAMS.fields_by_name['square_padding'])
_PADDINGPARAMS.fields_by_name['square_padding'].containing_oneof = _PADDINGPARAMS.oneofs_by_name['params']
_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES.containing_type = _NORMALIZATIONPARAMS
_NORMALIZATIONPARAMS.fields_by_name['means'].message_type = _NORMALIZATIONPARAMS_PERCHANNELMEANVALUES
_NORMALIZATIONPARAMS.oneofs_by_name['mean'].fields.append(_NORMALIZATIONPARAMS.fields_by_name['channelwise_mean'])
_NORMALIZATIONPARAMS.fields_by_name['channelwise_mean'].containing_oneof = _NORMALIZATIONPARAMS.oneofs_by_name['mean']
_NORMALIZATIONPARAMS.oneofs_by_name['mean'].fields.append(_NORMALIZATIONPARAMS.fields_by_name['means'])
_NORMALIZATIONPARAMS.fields_by_name['means'].containing_oneof = _NORMALIZATIONPARAMS.oneofs_by_name['mean']
DESCRIPTOR.message_types_by_name['ImagePreprocessingStepParams'] = _IMAGEPREPROCESSINGSTEPPARAMS
DESCRIPTOR.message_types_by_name['ImageSize'] = _IMAGESIZE
DESCRIPTOR.message_types_by_name['CroppingParams'] = _CROPPINGPARAMS
DESCRIPTOR.message_types_by_name['ResizingParams'] = _RESIZINGPARAMS
DESCRIPTOR.message_types_by_name['PaddingParams'] = _PADDINGPARAMS
DESCRIPTOR.message_types_by_name['NormalizationParams'] = _NORMALIZATIONPARAMS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImagePreprocessingStepParams = _reflection.GeneratedProtocolMessageType('ImagePreprocessingStepParams', (_message.Message,), {'DESCRIPTOR': _IMAGEPREPROCESSINGSTEPPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ImagePreprocessingStepParams)
ImageSize = _reflection.GeneratedProtocolMessageType('ImageSize', (_message.Message,), {'DESCRIPTOR': _IMAGESIZE, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ImageSize)
CroppingParams = _reflection.GeneratedProtocolMessageType('CroppingParams', (_message.Message,), {'DESCRIPTOR': _CROPPINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(CroppingParams)
ResizingParams = _reflection.GeneratedProtocolMessageType('ResizingParams', (_message.Message,), {'DESCRIPTOR': _RESIZINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ResizingParams)
PaddingParams = _reflection.GeneratedProtocolMessageType('PaddingParams', (_message.Message,), {'DESCRIPTOR': _PADDINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(PaddingParams)
NormalizationParams = _reflection.GeneratedProtocolMessageType('NormalizationParams', (_message.Message,), {'PerChannelMeanValues': _reflection.GeneratedProtocolMessageType('PerChannelMeanValues', (_message.Message,), {'DESCRIPTOR': _NORMALIZATIONPARAMS_PERCHANNELMEANVALUES, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'}), 'DESCRIPTOR': _NORMALIZATIONPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(NormalizationParams)
_sym_db.RegisterMessage(NormalizationParams.PerChannelMeanValues)
DESCRIPTOR._options = None
| [
198,
6,
8645,
515,
8435,
11876,
2438,
2637,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
43087,
355,
4808,
20147,
1968,
273,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
3275,
355,
4808,
20500,
198,
6738,
23645,
13,
11235,
672,
3046,
... | 2.763974 | 6,995 |
import pytest
@pytest.mark.asyncio
| [
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
198
] | 2.466667 | 15 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 3.111111 | 45 |
"""
Part 1 of the tweet preprocessing phase
Lang: py3
"""
import json
import re
import csv
import sys
OUTPUT_DIR = os.path.join(os.getcwd(), 'part1_output')
EMOJI_PATTERN = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
URL_PATTERN = re.compile('http\S+')
#Method to replace usermentions with actual username
if(__name__ == "__main__"):
if(not(len(sys.argv) == 2)):
print("Usage: tweet_preprocess_part1.py <TWEET_DUMP_FILEPATH>")
sys.exit()
#Input filepath
input_filepath = sys.argv[1]
#If the input file is X/Y/input_file.csv, then output filename is input_file_spacyNP.csv
output_filepath = OUTPUT_DIR + input_filepath.split("/")[-1].split(".")[0] + "_part1_results.csv"
try:
g = open(output_filepath, "w")
except IOError:
print("Error while creating new file!!!")
sys.exit()
writer = csv.writer(g, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(["tweet_id", "actual_text", "preprocess_part1_results"])
#with open("news_outlets_234h.jsonl") as f:
with open(input_filepath) as f:
count = 0
for line in f:
#Load the tweet info from the tweet dump
json_line = json.loads(line)
#Get the tweet full_text
text = json_line['full_text']
#Replace all the newlines with spaces
text = text.replace("\n", ' ')
#Remove all the emojis from the tweet
text = EMOJI_PATTERN.sub('', text)
#Remove all the URLs from the tweet
text = URL_PATTERN.sub('', text)
#Split the text into words (filter removes the empty strings after split)
text = list(filter(None, text.split(" ")))
#Get all the usermentions in the tweet which are then replaced by the actual username
user_mentions = json_line['entities']['user_mentions']
#If the last word in the tweet starts with #, then lastPP is True
if(text[len(text) - 1].startswith("#") or text[len(text) - 1].startswith("@")):
lastPP = True
else:
lastPP = False
#Check: If tweet is just "#something"
#Iterate from the last word till the first word of the tweet
for i in range(len(text) - 1, 0, -1):
if(text[i].startswith("@") or text[i].startswith("#") and lastPP):
if(text[i - 1].startswith(("#", "@"))):
text[i] = ""
else:
lastPP = False
#Remove all the empty strings (incase any) obtained from the previous loop
text = filter(None, text)
#Join the words of the text
text = ' '.join(text)
#Write to file
writer.writerow([json_line["id_str"], json_line['full_text'], text])
count += 1
if(count % 5000 == 0):
print("Part1: Processed", count, "tweets...")
g.close()
print("Part1 of preprocessing done....you can now run the part2 code to further preprocess your tweet text.")
| [
37811,
198,
7841,
352,
286,
262,
6126,
662,
36948,
7108,
198,
43,
648,
25,
12972,
18,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
198,
2606,
7250,
3843,
62,
34720,
796,
28686,
1... | 2.470389 | 1,182 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import pickle
# Verify the number of command line arguments
assert len(sys.argv) == 5
num_agents_path = sys.argv[1]
T_path = sys.argv[2]
m_path = sys.argv[3]
output_path_root = sys.argv[4]
data = {}
with open(num_agents_path, 'rb') as handle:
data['num_agents'] = pickle.load(handle)
with open(T_path, 'rb') as handle:
data['T'] = pickle.load(handle)
with open(m_path, 'rb') as handle:
data['m'] = pickle.load(handle)
plt.rcParams["font.family"] = "Times New Roman"
policies = ['Control',
'Perpetual Random',
'Initial Random',
'Myopic',
'One Step Lookahead',
'Modifed Reconnect']
marker_dict = {'Control': 'o',
'Perpetual Random': 'v',
'Initial Random': '^',
'Myopic': 's',
'One Step Lookahead': 'd',
'Modifed Reconnect': 'X'}
normalized_plots = [['Cumulative','num_agents','Objective'],
['Cumulative','T','Objective'],
['Terminal','num_agents','Objective']
]
x_labels = {'num_agents': 'Number of Nodes |V|',
'T': 'Time Horizon T',
'm': 'Number of Edges Formed by Entering\nNodes during Network Construction'}
y_labels = {'num_agents': '\ndivided by |V|',
'T': '\ndivided by T'}
params = ['num_agents', 'T', 'm']
obj_modes = ['Cumulative', 'Terminal']
exog_modes = ['Uniform', 'Weighted']
plot_modes = ['Objective', 'Time']
for obj_mode in obj_modes:
for exog_mode in exog_modes:
for plot_mode in plot_modes:
output_path = output_path_root + obj_mode + '_' + exog_mode + \
'_' + plot_mode + '_plots'
generate_plots_ijcai(params=params, data=data, obj_mode=obj_mode,
exog_mode=exog_mode, policies=policies,
figsize=(23, 8), mode=plot_mode,
filename=output_path)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
25064,
198,
11748,
2298,
293,
198,
198,
2,
49899,
262,
1271,
286,
3141,
1627,
7159,
19... | 2.001947 | 1,027 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from kfp import components
from kfp.v2 import dsl
import kfp.v2.compiler as compiler
component_op_1 = components.load_component_from_text("""
name: upstream
inputs:
- {name: input_1, type: String}
- {name: input_2, type: Float}
- {name: input_3, type: }
- {name: input_4}
- {name: input_5, type: Metrics}
- {name: input_6, type: Datasets}
- {name: input_7, type: Some arbitrary type}
- {name: input_8, type: {GcsPath: {data_type: TSV}}}
outputs:
- {name: output_1, type: Integer}
- {name: output_2, type: Model}
- {name: output_3}
implementation:
container:
image: gcr.io/image
args:
- {inputValue: input_1}
- {inputValue: input_2}
- {inputUri: input_3}
- {inputUri: input_4}
- {inputUri: input_5}
- {inputUri: input_6}
- {inputUri: input_7}
- {inputUri: input_8}
- {outputPath: output_1}
- {outputUri: output_2}
- {outputPath: output_3}
""")
component_op_2 = components.load_component_from_text("""
name: downstream
inputs:
- {name: input_a, type: Integer}
- {name: input_b, type: Model}
- {name: input_c}
implementation:
container:
image: gcr.io/image
args:
- {inputValue: input_a}
- {inputUri: input_b}
- {inputPath: input_c}
""")
@dsl.pipeline(name='pipeline-with-various-types')
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
pipeline_root='dummy_root',
output_path=__file__ + '.json')
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.634941 | 767 |
import random
import sys
import argparse
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.nn import functional as F
from .basic import ModelBase
| [
11748,
4738,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
1330,
6436,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
... | 3.964286 | 56 |
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class PageNumberPaginationCustom(PageNumberPagination):
"""Pagination.
Gives current page number and last page number.
"""
| [
6738,
1334,
62,
30604,
13,
79,
363,
1883,
1330,
7873,
15057,
47,
363,
1883,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
628,
198,
4871,
7873,
15057,
47,
363,
1883,
15022,
7,
9876,
15057,
47,
363,
1883,
2599,
198,
220,
220,
2... | 3.558824 | 68 |
from redbot import version_info, VersionInfo
from redbot.core.bot import Red
from redbot.core.errors import CogLoadError
from .voicetools import VoiceTools
| [
6738,
2266,
13645,
1330,
2196,
62,
10951,
11,
10628,
12360,
198,
6738,
2266,
13645,
13,
7295,
13,
13645,
1330,
2297,
198,
6738,
2266,
13645,
13,
7295,
13,
48277,
1330,
327,
519,
8912,
12331,
198,
198,
6738,
764,
13038,
291,
316,
10141,
... | 3.511111 | 45 |
# Definition for a binary tree node.
| [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
628,
198
] | 3.9 | 10 |
# python-mqlight - high-level API by which you can interact with MQ Light
#
# Copyright 2015-2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as TestCommand
from codecs import open as codecs_open
from os import path, environ
from platform import system, architecture
if not sys.version_info[:2] >= (2, 6):
print('ERROR: Python 2.6 or newer is required')
sys.exit(1)
if system() == 'Windows' and architecture()[0] == '32bit':
print('ERROR: Mqlight requires 64bit Python on Windows.')
sys.exit(1)
HERE = path.abspath(path.dirname(__file__))
with codecs_open(path.join(HERE, 'description.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
if system() == 'Darwin':
environ['ARCHFLAGS'] = '-arch x86_64 -mmacosx-version-min=10.8'
def get_sources():
"""Return a list of source files to compile into the extension"""
if system() == 'Windows':
return [path.join('mqlight', 'cproton.cxx')]
else:
return [path.join('mqlight', 'cproton.c')]
def get_runtime_library_dirs():
"""Return a custom rpath to write into the extension"""
if system() == 'Linux':
return ['$ORIGIN']
else:
return []
def get_extra_compile_args():
"""Return a list of extra arguments to supply at extension compile time"""
if system() == 'Linux':
return ['-Wno-address', '-Wno-unused-function']
else:
return []
def get_extra_link_args():
"""Return a list of extra arguments to supply at extension link time"""
if system() == 'Darwin':
return ['-Wl,-rpath,@loader_path/']
else:
return []
# pylint: disable=R0904
class PyTest(TestCommand):
"""TestCommand to run suite using py.test"""
test_args = []
test_suite = True
pytest_args = []
setup(
name='mqlight',
version='9.9.9999999999',
description='IBM MQ Light Client Python Module',
long_description=LONG_DESCRIPTION,
url='https://developer.ibm.com/messaging/mq-light/',
author='IBM',
author_email='mqlight@uk.ibm.com',
license='proprietary',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Communications',
'Topic :: System :: Networking',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='ibm mqlight',
packages=find_packages(
exclude=['tests']),
package_data={
'mqlight': [
'*.dll',
'libqpid-proton*',
'samples/*.py',
'licenses/*',
'README']},
ext_package='mqlight',
ext_modules=[
Extension(
name='_cproton',
sources=get_sources(),
include_dirs=[
path.join(
HERE,
'include')],
library_dirs=['mqlight'],
libraries=['qpid-proton'],
runtime_library_dirs=get_runtime_library_dirs(),
extra_compile_args=get_extra_compile_args(),
extra_link_args=get_extra_link_args()),
],
install_requires=[
'argparse',
'backports.ssl_match_hostname>=3.4.0.2'
],
test_suite='tests',
tests_require=[
'pytest_cov',
'pytest_pep8',
'pytest_timeout',
'pytest',
'pbr==1.6.0'],
cmdclass={
'test': PyTest}
)
| [
198,
2,
21015,
12,
76,
80,
2971,
532,
1029,
12,
5715,
7824,
416,
543,
345,
460,
9427,
351,
337,
48,
4401,
198,
2,
198,
2,
15069,
1853,
12,
5539,
19764,
11421,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13... | 2.450199 | 1,757 |
import xml.etree.ElementTree as ET
"""
elemToString
This takes in content, a node, and returns the inner text
"""
"""
cleanElemToString
This takes in content, a node, and returns the inner text with only one space between
words and no line breaks
"""
"""
stageDirInLine
This gets the stage directions in the middle of a line and writes them to our file.
This takes in content, a stage directions XML node, and a targetFile, the file object with write privileges.
"""
"""
printSingleLine
This writes a string to file after removing extra spaces and all line breaks
This takes in line, a string, and targetFile, a file object with write privileges.
"""
"""
speaker
This writes the speaker's name to file and returns it to use as the key for the dictionary.
This takes in content, a speaker node, and a targetFile, a file object with write privileges.
"""
"""
getLines
This will write all the lines that one character speaks and the in-line stage directions to a file.
It takes in content, a node with tag 'ab', and a targetFile, a file object with write privilege.
"""
"""
printOneScene
This will write a single scene as we want it formatted and update the character line dictionary.
It takes in a scene (div2) node, a file to write to, and a dicitionary that holds the lines characters.
"""
"""
visitAct
This is a visitor parser to create a custom navigation bar for any play we use.
It requires an xmlTree that has acts noted by div1 and scenes noted by div2, like the Folger
XML versions of the plays. It also requires a file to write to. Hopefully, this is the file
that we're writing to all along.
This will go through and find all the acts and scenes based on those assumptions. It will
write out the proper HTML to make a navbar based on those assumptions.
"""
dictionary = {}
header = open("header.html", "r")
lines = header.readlines()
target = open("index.html.erb", "w")
tree = ET.parse("data.xml").getroot()
formatting = open("../../assets/javascripts/application.js", "w")
formatHeader = open("../../assets/javascripts/applicationheader.txt", "r")
# Write the header to index file first, using the visitor parser at the appropriate place
for line in lines:
target.write(line)
if '<a class="navbar-brand" href="#">' in line:
title = tree.find(".//{http://www.tei-c.org/ns/1.0}title")
target.write(elemToString(title))
elif '<div class="row">' in line:
oldVisitAct(tree, target)
elif '<ul class="scroll-menu scroll-menu-2x">' in line:
visitAct(tree, target)
jsLines = formatHeader.readlines()
for line in jsLines:
formatting.write(line)
# Start by finding all the acts, noted with div1's
acts = tree.findall(".//{http://www.tei-c.org/ns/1.0}div1")
for act in acts:
target.write('\n<h1 id = %s>\nAct '% act.get('n'))
target.write('%s\n</h1>' % act.get('n'))
# Find all the scenes in the act. Each has the tag div2
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
# idNumber is the id attribute so the navigation works.
# It reflects the ActNumber.SceneNumber numbering of Shakespeare plays
idNumber = act.get('n') + "." + scene.get('n')
target.write("\n<h2 id ="+idNumber+">\nScene %s\n</h2>" % scene.get('n'))
writeOneScene(scene, target, dictionary)
target.write("</div>\n</body>\n</html>")
target.close()
formatting.write("\n})")
chars = open("characters.html.erb", "w")
chars.write("<DOCTYPE! HTML>\n<html>")
chars.write('<center>\n<table style="width:50%">\n')
chars.write("<tr><th><b>Character Name</b></th><th><b>Modified Number of Lines</b></th>")
chars.write("<th><b>Original Number of Lines</b></th></tr>")
# In a table we output the name of the character from the dictionary
# and the number of lines they spoke
for key in dictionary:
chars.write('<tr><td>%s</td>' % key)
chars.write('<td>%d</td>' % dictionary[key])
chars.write('<td>%d</td></tr>' % dictionary[key])
chars.write("</table></center>")
chars.close()
| [
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
198,
37811,
198,
68,
10671,
2514,
10100,
198,
1212,
2753,
287,
2695,
11,
257,
10139,
11,
290,
5860,
262,
8434,
2420,
198,
37811,
198,
198,
37811,
198,
27773,
36,
10671,
25... | 2.964784 | 1,363 |
"""
1 - Faรงa um programa que determine e mostre os cinco primeiros mรบltiplos de 3, considerando nรบmeros maiores que 0.
"""
soma = 0
for num in range(1,6):
numero = num * 3
soma += 1
print(numero)
| [
37811,
198,
16,
532,
18350,
50041,
23781,
1430,
64,
8358,
5004,
304,
749,
260,
28686,
269,
259,
1073,
6994,
72,
4951,
285,
21356,
2528,
24705,
418,
390,
513,
11,
2074,
25440,
299,
21356,
647,
418,
285,
1872,
2850,
8358,
657,
13,
198,
... | 2.418605 | 86 |
import json
import functools
from pprint import pprint
from os.path import join, exists
from sqlalchemy import Column, Integer, create_engine, Unicode, TypeDecorator
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_media import Image, StoreManager, FileSystemStore, ImageProcessor
# Step 1
TEMP_PATH = '/tmp/sqlalchemy-media'
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
session_factory = sessionmaker(bind=engine)
# Step 2
StoreManager.register('fs', functools.partial(FileSystemStore, TEMP_PATH, 'http://static.example.org/'), default=True)
# Sqlite is not supporting JSON type, so emulating it:
Base.metadata.create_all(engine, checkfirst=True)
if __name__ == '__main__':
session = session_factory()
with StoreManager(session):
person1 = Person()
# person1.image = Image.create_from('https://www.python.org/static/img/python-logo@2x.png')
person1.image = Avatar()
person1.image.attach('https://www.python.org/static/img/python-logo@2x.png')
session.add(person1)
session.commit()
print(person1.id)
pprint(person1.image)
path = join(TEMP_PATH, person1.image.path)
print(path)
print(person1.image.locate())
assert exists(path)
| [
11748,
33918,
198,
11748,
1257,
310,
10141,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
7160,
198,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
2251,
62,
18392,
11,
34371,
11,
5994,
... | 2.71371 | 496 |
from django.contrib import admin
from .models import *
urlpatterns = [
]
| [
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
685,
628,
220,
220,
628,
198,
198,
60,
220,
198
] | 2.625 | 32 |
import json
from requests import Session
from lib.base import SaltAction
| [
11748,
33918,
198,
6738,
7007,
1330,
23575,
198,
198,
6738,
9195,
13,
8692,
1330,
13754,
12502,
628
] | 4.411765 | 17 |
# @Title: ้พ่กจๆฑๅ (Sum Lists LCCI)
# @Author: 18015528893
# @Date: 2021-02-12 21:23:09
# @Runtime: 60 ms
# @Memory: 14.8 MB
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
| [
198,
2,
2488,
19160,
25,
16268,
241,
122,
26193,
101,
162,
109,
224,
161,
240,
234,
357,
13065,
44968,
406,
4093,
40,
8,
198,
2,
2488,
13838,
25,
1248,
486,
2816,
2078,
49682,
198,
2,
2488,
10430,
25,
33448,
12,
2999,
12,
1065,
23... | 2.031496 | 127 |
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import os
import sys
import re
def find_packages(path, base="" ):
""" Find all packages in path """
packages = {}
if "__init__.py" in os.listdir(path):
packages[base] = path
for item in os.listdir(path):
itempath = os.path.join(path,item)
if os.path.isdir(itempath):
newbase = "%s.%s" % (base, item)
packages.update(find_packages(itempath, newbase))
return packages
packages = find_packages("dvbcss","dvbcss")
package_names = packages.keys()
otherArgs = {}
# if registering or uploading to PyPI: convert markdown readme to ReStructuredText
# using pandoc
lcase_args = [arg.lower() for arg in sys.argv]
if "register" in lcase_args or "upload" in lcase_args:
retval = os.system("pandoc --from=markdown --to=rst --output=tmp.README.rst README.md")
if retval==0:
otherArgs["long_description"] = open("tmp.README.rst").read()
else:
raise RuntimeError("Unable to convert documentation from Markdown to ReStructuredText. Is 'pandoc' command line tool installed?")
try:
VERSION, _ = re.match("^([.0-9a-zA-Z]+)-(.+)$", open("VERSION").read().replace("\n","").replace("\r","")).groups()
setup(
name = "pydvbcss",
version = VERSION,
author = "Matt Hammond (British Broadcasting Corporation)",
author_email = "matt.hammond@bbc.co.uk",
description = ("pydvbcss is a library implementing DVB \"CSS\" protocols for Companion Screen Synchronisation."),
license = "Apache 2.0",
keywords = "dvb companion synchronisation synchronization second-screen protocol",
url = "http://github.com/BBC/pydvbcss",
packages = package_names,
package_dir = packages,
install_requires = filter(len, [req.strip() for req in open("requirements.txt","r").read().splitlines()]),
test_suite = "test.test_all.testSuite",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking :: Time Synchronization",
],
**otherArgs
)
finally:
if "long_description" in otherArgs:
os.remove("tmp.README.rst")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
1853,
3517,
32250,
10501,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
42... | 2.653418 | 1,258 |
import numpy as np
from ad.forward_mode import Ad_array
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
512,
13,
11813,
62,
14171,
1330,
1215,
62,
18747,
628,
628,
198
] | 3.05 | 20 |
import argparse
import xarray as xr
"""
Join and convert the NETCDF3_CLASSIC files into a large NETCDF4 file (with full HDF5 API)
"""
parser = argparse.ArgumentParser(description='Combine data')
parser.add_argument('-i', '--input', required=True, help='Path to folder containing input files; also output folder')
args = parser.parse_args()
combine(args.input, ['tg', 'tg_stderr', 'pp', 'pp_stderr', 'rr', 'rr_stderr'], 'data1')
combine(args.input, ['tn', 'tn_stderr', 'tx', 'tx_stderr'], 'data2')
| [
11748,
1822,
29572,
198,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
198,
37811,
198,
18234,
290,
10385,
262,
30502,
34,
8068,
18,
62,
31631,
2149,
3696,
656,
257,
1588,
30502,
34,
8068,
19,
2393,
357,
4480,
1336,
5572,
37,
20,
7824,... | 2.779006 | 181 |
from automationlookup.models import UserLookup
from django.conf import settings
| [
6738,
22771,
5460,
929,
13,
27530,
1330,
11787,
8567,
929,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628
] | 4.263158 | 19 |
# --------------------------------------------------------
# Tensorflow iCAN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ['DATASET'] = 'HICO'
os.environ["KMP_BLOCKTIME"] = str(0)
os.environ["KMP_SETTINGS"] = str(1)
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
os.environ["OMP_NUM_THREADS"] = str(8)
import tensorflow as tf
import argparse
from ult.config import cfg
from models.test_HICO import obtain_test_dataset_wo_obj, test_net_data_api_wo_obj
if __name__ == '__main__':
args = parse_args()
print(args)
# test detections result
weight = cfg.ROOT_DIR + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
import os
if not os.path.exists(weight + '.index'):
weight = cfg.LOCAL_DATA + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
print('weight:', weight)
print('Human thres = ' + str(args.human_thres) + ', Object thres = ' + str(args.object_thres) + ', iter = ' + str(
args.iteration) + ', path = ' + weight)
output_file = cfg.LOCAL_DATA + '/Results/' + str(args.iteration) + '_' + args.model + '_tin.pkl'
if os.path.exists(output_file):
os.remove(output_file)
# init session
HICO_dir = cfg.ROOT_DIR + '/Results/HICO/' + str(args.iteration) + '_' + args.model + '/'
tfconfig = tf.ConfigProto(device_count={"CPU": 12},
inter_op_parallelism_threads=8,
intra_op_parallelism_threads=8,
allow_soft_placement=True)
# init session
# tfconfig = tf.ConfigProto(allow_soft_placement=True)
# tfconfig.gpu_options.allow_growth = True
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
# net = ResNet50(model_name=args.model)
# net.create_architecture(False)
#
#
# saver = tf.train.Saver()
# saver.restore(sess, weight)
#
# print('Pre-trained weights loaded.')
#
# test_net(sess, net, Test_RCNN, output_file, args.object_thres, args.human_thres)
# sess.close()
# Generate_HICO_detection(output_file, HICO_dir)
if args.model.__contains__('res101'):
os.environ['DATASET'] = 'HICO_res101'
from networks.HOI import HOI
net = HOI(model_name=args.model)
else:
from networks.HOI import HOI
net = HOI(model_name=args.model)
stride = 200
image, blobs, image_id = obtain_test_dataset_wo_obj(args.object_thres, args.human_thres, test_type=args.test_type,
has_human_threhold=not args.not_h_threhold,
stride=stride)
image = image[0:1]
print(blobs, image)
tmp_labels = tf.one_hot(tf.reshape(tf.cast(blobs['O_cls'], tf.int32), shape=[-1, ]), 80, dtype=tf.float32)
tmp_ho_class_from_obj = tf.cast(tf.matmul(tmp_labels, net.obj_to_HO_matrix) > 0, tf.float32)
# action_ho = blobs['O_cls']
net.set_ph(image, image_id, num_pos=blobs['H_num'], Human_augmented=blobs['H_boxes'],
Object_augmented=blobs['O_boxes'],
action_HO=None, sp=blobs['sp'],)
# net.set_add_ph()
# net.init_verbs_objs_cls()
net.create_architecture(False)
saver = tf.train.Saver()
print(weight)
saver.restore(sess, weight)
print('Pre-trained weights loaded.')
test_net_data_api_wo_obj(sess, net, output_file, blobs['H_boxes'][:, 1:], blobs['O_boxes'][:, 1:],
blobs['O_cls'], blobs['H_score'], blobs['O_score'], None, image_id, args.debug)
sess.close()
| [
2,
20368,
22369,
198,
2,
309,
22854,
11125,
1312,
44565,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
20368,
22369,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
... | 2.196439 | 1,741 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-28 14:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
16,
319,
1584,
12,
2931,
12,
2078,
1478,
25,
3553,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.73913 | 69 |
#850ร480
ANCHO=850 #x4 = 3400
ALTO=480 #x3 = 1440
VERDE=[0,255,0]
ROJO=[255,0,0]
AZUL=[0,0,255]
AMARILLO=[255,255,0]
AZUL_2=[0,255,255]
NEGRO=[0,0,0]
BLANCO=[255,255,255]
GRIS=[180,180,180]
WIN = False | [
2,
25764,
12906,
22148,
198,
1565,
44899,
28,
25764,
220,
220,
1303,
87,
19,
796,
4974,
405,
198,
1847,
10468,
28,
22148,
220,
220,
220,
1303,
87,
18,
796,
49557,
198,
198,
5959,
7206,
41888,
15,
11,
13381,
11,
15,
60,
198,
13252,
... | 1.704918 | 122 |
from .context import Directed, Undirected, edmonds_karp
from unittest import TestCase
# Figure 6.13 from The Algorithm Design Manual
CASES = [
{
'class': Undirected,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 0,
'to': 6,
'expected': 7
},
{
'class': Undirected,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 6,
'to': 0,
'expected': 7
},
{
'class': Directed,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 0,
'to': 6,
'expected': 7
},
{
'class': Directed,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 6,
'to': 0,
'expected': 0
}
]
| [
6738,
764,
22866,
1330,
4128,
276,
11,
13794,
1060,
276,
11,
1225,
42620,
62,
74,
5117,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
2,
11291,
718,
13,
1485,
422,
383,
978,
42289,
8495,
17969,
198,
34,
1921,
1546,
796,
685... | 1.382867 | 1,144 |
regions = {}
while True:
in_line = input()
if 'Aggregate' == in_line:
break
[region, shell] = filter(None, in_line.split(" "))
if region not in regions:
regions[region] = []
if int(shell) not in regions[region]:
regions[region].append(int(shell))
print(("\n".join(
[f'{region} -> {", ".join(map(str, shells))} ({calculate_giant_shell(shells)})' for region, shells in
regions.items()])))
| [
2301,
507,
796,
23884,
628,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
287,
62,
1370,
796,
5128,
3419,
198,
220,
220,
220,
611,
705,
46384,
49373,
6,
6624,
287,
62,
1370,
25,
198,
220,
220,
220,
220,
220,
220,
220,
2270,
628,
... | 2.390374 | 187 |
from models import db, User
import bcrypt
import readline
# global variables that contains several types of commands
create_commands = [ "create", "create_user", "createuser", "adduser", "add" ]
update_commands = [ "update", "update_user", "updateuser", "passwd" ]
delete_commands = [ "delete", "delete_user", "deluser", "del" ]
list_commands = [ "list", "list_users", "listusers", "ls" ]
help_commands = [ "help", "?" ]
exit_commands = [ "quit", "exit", "bye" ]
commands = create_commands + update_commands + delete_commands + list_commands + help_commands + exit_commands
# the prompt or the ps1 variable (reference to the $PS1 variable of a shell in *NIX)
ps4 = "pef_db $ "
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
def process_command(command:str):
"""
Process the commands that the user enters
"""
try:
# we parse the command
command = command.split()
if command[0] in create_commands:
create_user(command[1], command[2])
elif command[0] in update_commands:
update_user(command[1])
elif command[0] in delete_commands:
delete_user(command[1])
elif command[0] in list_commands:
try:
if command[1] in ["-v","--verbose"]:
list_users(True)
else:
print("No valid argument passed going to default")
list_users()
except IndexError:
list_users()
elif command[0] in help_commands:
usage()
elif command[0] in exit_commands:
quit()
else:
print("No valid command entered type ? or help to find more informations")
except IndexError:
print("")
def create_user(name:str, password):
"""
Create a user with a given username and password in the database
:param str name: the username
:param str password: the password
"""
if User.query.get(name):
print(f"Sorry the user '{name}' already exists in database, please consider using another name")
else:
u = User(name,bcrypt.hashpw(password.encode('utf-8'),bcrypt.gensalt()))
print(f"\nNew User:\nName = {u.name}\nPassword = {password}\nIs that correct ? [Y/n]")
if input(">> ") in ['', 'Y', 'y', 'yes']:
db.session.add(u)
db.session.commit()
print(f"User {u.name} added")
print("")
def update_user(name:str):
"""
Change the password of a user, it updates the user password in the database
:param str name: the name of the user we want to change the password
"""
if User.query.get(name):
u = User.query.get(name)
new_pass = input(f"Enter a new password for the user '{name}': ")
new_pass_confirm = input("Confirm the new password: ")
if new_pass == new_pass_confirm:
u.password = bcrypt.hashpw(new_pass.encode('utf-8'), bcrypt.gensalt())
db.session.commit()
print(f"Password for user '{name}' have been changed successfully")
else:
print("Passwords don't match\nCancelling password update")
else:
print(f"Cannot find the user '{name}'")
print("")
def delete_user(name:str):
"""
Delete a user from database
:param str name: the name of teh user we want to delete
"""
if not User.query.get(name):
print(f"Sorry the user '{name}' cannot be found")
else:
u = User.query.get(name)
print(f"\nDeleting user:\nName = {u.name}\nAre you sure ? [Y/n]")
if input(">> ") in ['', 'Y', 'y', 'yes']:
db.session.delete(u)
db.session.commit()
print(f"User {u.name} deleted")
print("")
def list_users(complete=False):
"""
Give a list of all the users stored in the database
:param boolean complete: whether the output of the command should be verbose or not
"""
users = User.query.all()
if len(users) == 0:
print("No users in database yet")
else:
if not complete:
for user in users:
print(user.name)
else:
for user in users:
print(f"{user.name} : {user.password}")
print("")
def usage():
"""
Shows how to use the cli
"""
print("Here is a list of available commands:")
print(" create / createuser / create_user / adduser / add [username] [password] : Add a new user in the database")
print(" update / updateuser / update_user / passwd [username] : Change the password of the user $username")
print(" delete / deleteuser / delete_user / deluser / del [username] : Delete the user $username from the database")
print(" list / list_users / ls [-v, --verbose] : lists all the users in the database")
print(" help / ? : show this help screen")
print(" quit / bye / exit : Exits the program\n")
def quit():
"""
Quit the cli properly
"""
print("Bye!\n")
exit()
# main loop, keyboardInterrupt behaves like the quit() command
while True:
try:
command = input(ps4)
process_command(command)
except KeyboardInterrupt:
quit()
| [
6738,
4981,
1330,
20613,
11,
11787,
198,
11748,
275,
29609,
198,
11748,
1100,
1370,
628,
198,
2,
3298,
9633,
326,
4909,
1811,
3858,
286,
9729,
198,
17953,
62,
9503,
1746,
796,
685,
366,
17953,
1600,
366,
17953,
62,
7220,
1600,
366,
17... | 2.26639 | 2,410 |
import subprocess
from typing import Optional, Dict
class ExternalProcessExecError(RuntimeError):
"""Error when an external process fails to run successfully"""
def run(cmd: str, suppress_error=False, env: Optional[Dict[str, str]] = None) -> (str, str):
"""Runs an external command and returns back its stdout and stderr"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(res, err) = proc.communicate()
res = res.decode(encoding="utf-8", errors="ignore")
err = err.decode(encoding="utf-8", errors="ignore")
if proc.returncode != 0 and not suppress_error:
msg = f"Failed to execute external process: {cmd}\n{res}\nError: {err}"
raise ExternalProcessExecError(msg, cmd)
return res, err
| [
11748,
850,
14681,
198,
6738,
19720,
1330,
32233,
11,
360,
713,
628,
198,
4871,
34579,
18709,
23002,
12331,
7,
41006,
12331,
2599,
198,
220,
220,
220,
37227,
12331,
618,
281,
7097,
1429,
10143,
284,
1057,
7675,
37811,
628,
198,
4299,
10... | 2.915129 | 271 |
from basic import Basic
| [
198,
6738,
4096,
1330,
14392,
628,
220,
220,
220,
220,
198
] | 2.818182 | 11 |
import pandas as pd
pd.set_option('display.max_columns', None)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import urllib.request
import os.path
from Selecting import *
# Download data
url = "https://raw.githubusercontent.com/CnrLwlss/Warren_2019/master/shiny/dat.txt"
outfile = "dat_py.txt"
mitoprot = "VDAC1"
if not os.path.isfile(outfile):
urllib.request.urlretrieve(url,outfile)
data = pd.read_csv(outfile,sep="\t")
# Drop unwanted columns
chans = data.channel.unique()
chans = [c for c in chans if ("LOG_" not in c) and ("MED_" not in c)]
data = data[data["channel"].isin(chans)]
# Group data by subject type
subjids = data.patient_id.unique()
subjids.sort()
patids = [id for id in subjids if "P" in id]
ctrlids = [id for id in subjids if "C" in id]
# Long to wide
wide = data.pivot_table(index=["cell_id","id","patient_id","subject_group"],values="value",columns="channel").reset_index()
cwide = wide[wide["patient_id"].isin(ctrlids)]
# Plotting options
alpha = 0.2
def_col = (1,0,0,alpha)
norm_col = (0,1,0,alpha)
pos_col = (0,0,1,alpha)
# Manually classify fibres by each protein
prots = ['NDUFB8', 'GRIM19', 'SDHA', 'UqCRC2', 'COX4+4L2', 'MTCO1', 'OSCP']
for prot in prots:
cols = [(0,0,0,alpha) for pt in wide[mitoprot]]
fig,ax = plt.subplots(num = "Select fibres below controls")
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
pts = plt.scatter(np.log(wide[mitoprot]),np.log(wide[prot]),color=cols,edgecolors="none")
cnts = sns.kdeplot(x=np.log(cwide[mitoprot]),y=np.log(cwide[prot]),levels=[0.1,0.25,0.5,0.75,0.95],color="yellow")
ax.set_xlabel("log("+mitoprot+")")
ax.set_ylabel("log("+prot+")")
sel_def = SelectFromCollection(ax,pts,colour_sel=def_col)
plt.show()
cols = [def_col if i in sel_def.ind else col for i,col in enumerate(cols)]
fig,ax = plt.subplots(num = "Select fibres above controls")
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
pts = plt.scatter(wide[mitoprot],wide[prot],color=cols,edgecolors="none")
cnts = sns.kdeplot(x=cwide[mitoprot],y=cwide[prot],levels=[0.1,0.25,0.5,0.75,0.95],color="yellow")
ax.set_xlabel("log("+mitoprot+")")
ax.set_ylabel("log("+prot+")")
sel_pos = SelectFromCollection(ax,pts,colour_sel=pos_col)
plt.show()
wide[prot+"_down"] = [i in sel_def.ind for i,val in enumerate(wide[prot])]
wide[prot+"_up"] = [i in sel_pos.ind for i,val in enumerate(wide[prot])]
wide.to_csv("ClassifiedWide.csv")
# Summarise classifications
clcols = ["patient_id","subject_group"]+[col for col in wide.columns if ("_up" in col) or ("_down" in col)]
cl = wide[clcols]
pid = cl.groupby("patient_id").mean()
sub = cl.groupby("subject_group").mean()
pid.to_csv("SummaryByPatient.csv", float_format='%.2f')
sub.to_csv("SummaryByType.csv", float_format='%.2f')
| [
11748,
19798,
292,
355,
279,
67,
198,
30094,
13,
2617,
62,
18076,
10786,
13812,
13,
9806,
62,
28665,
82,
3256,
6045,
8,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,... | 2.383596 | 1,207 |
"""GTZAN-Genre Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
This dataset was used for the well known genre classification paper:
.. code-block:: latex
"Musical genre classification of audio signals " by G. Tzanetakis and
P. Cook in IEEE Transactions on Audio and Speech Processing 2002.
The dataset consists of 1000 audio tracks each 30 seconds long. It
contains 10 genres, each represented by 100 tracks. The tracks are all
22050 Hz mono 16-bit audio files in .wav format.
"""
import os
from typing import BinaryIO, Optional, TextIO, Tuple
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import io
BIBTEX = """@article{tzanetakis2002gtzan,
title={GTZAN genre collection},
author={Tzanetakis, George and Cook, P},
journal={Music Analysis, Retrieval and Synthesis for Audio Signals},
year={2002}
}"""
INDEXES = {
"default": "1.0",
"test": "1.0",
"1.0": core.Index(filename="gtzan_genre_index_1.0.json"),
}
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="genres.tar.gz",
url="http://opihi.cs.uvic.ca/sound/genres.tar.gz",
checksum="5b3d6dddb579ab49814ab86dba69e7c7",
destination_dir="gtzan_genre",
)
}
LICENSE_INFO = "Unfortunately we couldn't find the license information for the GTZAN_genre dataset."
class Track(core.Track):
"""gtzan_genre Track class
Args:
track_id (str): track id of the track
Attributes:
audio_path (str): path to the audio file
genre (str): annotated genre
track_id (str): track id
"""
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
tags_gtzan_data=[(self.genre, "gtzan-genre")],
metadata={
"title": "Unknown track",
"artist": "Unknown artist",
"release": "Unknown album",
"duration": 30.0,
"curator": "George Tzanetakis",
},
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a GTZAN audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
audio, sr = librosa.load(fhandle, sr=22050, mono=True)
return audio, sr
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The gtzan_genre dataset
"""
@core.copy_docs(load_audio)
| [
37811,
19555,
57,
1565,
12,
13746,
260,
16092,
292,
316,
8778,
263,
198,
198,
492,
37202,
653,
3712,
16092,
292,
316,
14151,
198,
220,
220,
220,
1058,
4871,
25,
4268,
2902,
628,
220,
220,
220,
770,
27039,
373,
973,
329,
262,
880,
19... | 2.433871 | 1,240 |
from flask import Blueprint, render_template, redirect, url_for, flash
from flask_restful import reqparse
from markdown import markdown
from model.Board import Board
from model.BoardList import BoardList
from model.BoardListCatalog import BoardCatalog
from model.Post import render_for_catalog
from model.Slip import get_slip
from model.Tag import Tag
from shared import db
boards_blueprint = Blueprint('boards', __name__, template_folder='template')
boards_blueprint.add_app_template_global(style_for_tag)
@boards_blueprint.route("/")
@boards_blueprint.route("/<int:board_id>")
@boards_blueprint.route("/rules", defaults={'board_id': None})
@boards_blueprint.route("/rules/<int:board_id>")
@boards_blueprint.route("/admin/<int:board_id>")
@boards_blueprint.route("/admin/<int:board_id>", methods=["POST"])
| [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
198,
6738,
42903,
62,
2118,
913,
1330,
43089,
29572,
198,
6738,
1317,
2902,
1330,
1317,
2902,
198,
198,
6738,
2746,
13,
29828,
1330,
5926,
198,
6... | 3.182171 | 258 |
"Amharic config with language-specific information."
from pynini import *
from pynini.lib import byte
from config import utils
GRAPHEMES = union("'", "-",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ",
"แฎ", "แฐ", "แฒ", "แณ", "แด", "แต",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ",
"แพ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ")
INITIAL_PUNCTUATION = utils.DEFAULT_INITIAL_PUNCTUATION
FINAL_PUNCTUATION = union(utils.DEFAULT_FINAL_PUNCTUATION,
utils.GEEZ_FINAL_PUNCTUATION)
NUMERALS = union(byte.DIGIT,
utils.GEEZ_NUMERALS)
# Amharic "over-differentiates" H graphemes, emphatic S graphemes, and glottal
# stop graphemes, which were all inherited from Ge'ez. Surveys suggest that
# Amharic speakers prefer one form over the others. These rules convert the
# dispreferred series graphemes to the one preferred series, when available.
# The surveys about grapheme preference come from the paper here:
# https://www.researchgate.net/profile/Fekede_Menuta/publication/312093656_OVER-DIFFERENTIATION_3_Over-differentiation_in_Amharic_Orthography_and_Attitude_towards_Reform/links/586f5d8408ae329d6215fb85/OVER-DIFFERENTIATION-3-Over-differentiation-in-Amharic-Orthography-and-Attitude-towards-Reform.pdf
REDUCE_H = string_map((("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ
"),
("แ", "แ"),
#("แ", "")
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ
", "แ
"),
("แ", "แ"),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
("แธ", "แ"),
("แน", "แ"),
("แบ", "แ"),
("แป", "แ"),
("แผ", "แ"),
("แฝ", "แ
"),
("แพ", "แ")
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ
", "")
))
REDUCE_S = string_map((("แธ", "แ"),
("แน", "แ"),
("แบ", "แ"),
("แป", "แ"),
("แผ", "แ"),
("แฝ", "แ
"),
("แพ", "แ")
#("แฟ", "")
))
REDUCE_A = string_map((("แ", "แ "),
("แ", "แ "),
("แ", "แข"),
("แ", "แฃ"),
("แ", "แค"),
("แ", "แฅ"),
("แ", "แฆ")
))
REDUCE_OVERDIFFERENTIATION = cdrewrite(
union(REDUCE_H, REDUCE_S, REDUCE_A),
"",
"",
byte.BYTES.closure())
LANGUAGE_SPECIFIC_PREPROCESSING = REDUCE_OVERDIFFERENTIATION
# These files are not in the repo. You will need to change these paths to match
# where you place the data files.
UD = "language_data/am/UD_Amharic-ATT/am_att-ud-test.conllu"
UM = ""
AC = "language_data/am/ac/am-wordbigrams.txt"
OSCAR = "language_data/am/oscar/am.txt"
OSCAR_DEDUP = "language_data/am/oscar/am_dedup.txt"
LCC = "language_data/am/lcc/amh_wikipedia_2016_30K/amh_wikipedia_2016_30K-sentences.txt"
| [
1,
5840,
9869,
291,
4566,
351,
3303,
12,
11423,
1321,
526,
198,
198,
6738,
279,
2047,
5362,
1330,
1635,
198,
6738,
279,
2047,
5362,
13,
8019,
1330,
18022,
198,
6738,
4566,
1330,
3384,
4487,
198,
198,
10761,
31300,
3620,
1546,
796,
644... | 1.267018 | 4,363 |