text stringlengths 0 1.05M | meta dict |
|---|---|
"""A set of helper functions for TensorFlow."""
from typing import Callable, Iterable, List, Optional, Tuple
import numpy as np
import tensorflow as tf
from neuralmonkey.logging import debug, debug_enabled
# pylint: disable=invalid-name
ShapeSpec = List[int]
# pylint: enable=invalid-name
def _get_current_experiment():
# This is needed to avoid circular imports.
from neuralmonkey.experiment import Experiment
return Experiment.get_current()
def update_initializers(initializers: Iterable[Tuple[str, Callable]]) -> None:
_get_current_experiment().update_initializers(initializers)
def get_initializer(var_name: str,
default: Callable = None) -> Optional[Callable]:
"""Return the initializer associated with the given variable name.
The name of the current variable scope is prepended to the variable name.
This should only be called during model building.
"""
full_name = tf.get_variable_scope().name + "/" + var_name
return _get_current_experiment().get_initializer(full_name, default)
def get_variable(name: str,
shape: ShapeSpec = None,
dtype: tf.DType = None,
initializer: Callable = None,
**kwargs) -> tf.Variable:
"""Get an existing variable with these parameters or create a new one.
This is a wrapper around `tf.get_variable`. The `initializer` parameter is
treated as a default which can be overriden by a call to
`update_initializers`.
This should only be called during model building.
"""
return tf.get_variable(
name=name, shape=shape, dtype=dtype,
initializer=get_initializer(name, initializer),
**kwargs)
def tf_print(tensor: tf.Tensor,
message: str = None,
debug_label: str = None) -> tf.Tensor:
"""Print the value of a tensor to the debug log.
Better than tf.Print, logs to console only when the "tensorval" debug
subject is turned on.
Idea found at: https://stackoverflow.com/a/39649614
Args:
tensor: The tensor whose value to print
Returns:
As tf.Print, this function returns a tensor identical to the input
tensor, with the printing side-effect added.
"""
def print_tensor(x: np.ndarray) -> tf.Tensor:
if message is not None:
debug(
"{}, shape: {}:\n{}".format(message, x.shape, x), debug_label)
else:
debug("Shape: {}\n{}".format(x.shape, x), debug_label)
return x
# To save time, check if debug will print something
if not debug_enabled(debug_label):
return tensor
log_op = tf.py_func(print_tensor, [tensor], [tensor.dtype])[0]
with tf.control_dependencies([log_op]):
res = tf.identity(tensor)
return res
def layer_norm(x, epsilon=1e-6):
"""Layer normalize the tensor x, averaging over the last dimension.
Implementation based on tensor2tensor.
"""
with tf.variable_scope("LayerNorm"):
gamma = get_variable(
name="gamma",
shape=[x.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.ones_initializer())
beta = get_variable(
name="beta",
shape=[x.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.zeros_initializer())
mean = tf.reduce_mean(x, axis=[-1], keep_dims=True)
variance = tf.reduce_mean(
tf.square(x - mean),
axis=[-1],
keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * gamma + beta
| {
"repo_name": "juliakreutzer/bandit-neuralmonkey",
"path": "neuralmonkey/tf_utils.py",
"copies": "1",
"size": "3632",
"license": "bsd-3-clause",
"hash": -5764959079854759000,
"line_mean": 30.8596491228,
"line_max": 78,
"alpha_frac": 0.6255506608,
"autogenerated": false,
"ratio": 4.035555555555556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161106216355555,
"avg_score": null,
"num_lines": null
} |
# A set of helper functions for the nba_shot_chart codebase
import pandas as pd
import numpy as np
import sys
from py_data_getter import data_getter
from py_db import db
db = db('nba_shots')
#Getting the shot data and returning a DataFrame with every shot for a specific player/season combo
def acquire_shootingData(dataType, _id, season='', isCareer=True):
if isCareer is False:
start_season_filt, end_season_filt = db.query("SELECT min(game_date), max(game_date) FROM shots WHERE season_id = %s" % (season.replace('-','')))[0]
# start_season_filt = str(season[:4])+'-08-01'
# end_season_filt = str(int(season[:4])+1)+'-08-01'
query_append = """AND season_id = %s
AND game_date > '%s'
AND game_date < '%s'""" % (season.replace('-',''), start_season_filt, end_season_filt)
else:
query_append = ''
shot_query = """SELECT
season_id, game_id,
team_id, game_date,
event_type, shot_type,
shot_zone_basic, shot_zone_area, LOC_X, LOC_Y,
IF(event_type='Made Shot', 1, 0) AS SHOT_MADE_FLAG,
zone_pct_plus,
efg_plus
FROM shots
JOIN shots_%s_Relative_Year USING (season_id, season_type, %s_id, shot_zone_basic, shot_zone_area)
WHERE %s_id = %s
AND season_type = 'Reg'
%s"""
shot_q = shot_query % (dataType, dataType, dataType, _id, query_append)
shots = db.query(shot_q)
shot_data = {'season_id':[], 'game_id':[], 'team_id':[], 'game_date':[], 'event_type':[], 'shot_type':[], 'shot_zone_basic':[], 'shot_zone_area':[], 'LOC_X':[], 'LOC_Y':[], 'SHOT_MADE_FLAG':[], 'zone_pct_plus':[], 'efg_plus':[]}
for row in shots:
season_id, game_id, team_id, game_date, event_type, shot_type, shot_zone_basic, shot_zone_area, LOC_X, LOC_Y, SHOT_MADE_FLAG, zone_pct_plus, efg_plus = row
shot_data['season_id'].append(season_id)
shot_data['game_id'].append(game_id)
shot_data['team_id'].append(team_id)
shot_data['game_date'].append(game_date)
shot_data['event_type'].append(event_type)
shot_data['shot_type'].append(shot_type)
shot_data['shot_zone_basic'].append(shot_zone_basic)
shot_data['shot_zone_area'].append(shot_zone_area)
shot_data['LOC_X'].append(LOC_X)
shot_data['LOC_Y'].append(LOC_Y)
shot_data['SHOT_MADE_FLAG'].append(SHOT_MADE_FLAG)
shot_data['zone_pct_plus'].append(zone_pct_plus)
shot_data['efg_plus'].append(efg_plus)
shot_df = pd.DataFrame(shot_data, columns=shot_data.keys())
return shot_df
# Get any of a variety of metrics from the Relative/Distribution/Breakdown/ShotSkill tables
def get_metrics(dataType, _id, season_id, isCareer, zone, metric):
if isCareer is False:
metric_q = """SELECT
ROUND(%s,1)
FROM shots_%s_Relative_Year r
JOIN shots_%s_Distribution_Year d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shots_%s_Breakdown b USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_%s_Year s USING (%s_id, season_id, season_type)
JOIN percentiles_%s_Year p USING (%s_id, season_id, season_type)
WHERE season_id = %s
AND %s_id = %s
AND shot_zone_area = 'all'
AND shot_zone_basic = '%s'
AND season_type = 'reg'
"""
metric_qry = metric_q % (metric, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, season_id.replace('-',''), dataType, _id, zone)
else:
metric_q = """SELECT
ROUND(%s,1)
FROM shots_%s_Relative_Career r
JOIN shots_%s_Distribution_Career d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN(
SELECT
%s_id, season_type, shot_zone_basic, shot_zone_area,
SUM(games) AS games,
SUM(attempts) AS attempts,
SUM(makes) AS makes,
SUM(points) AS points
FROM shots_%s_Breakdown
WHERE %s_id = %s
AND season_type = 'reg'
GROUP BY shot_zone_area, shot_zone_basic, season_type
) b USING (%s_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_%s_Career s USING (%s_id, season_id, season_type)
JOIN percentiles_%s_Career p USING (%s_id, season_id, season_type)
WHERE %s_id = %s
AND shot_zone_area = 'all'
AND shot_zone_basic = '%s'
AND season_type = 'reg'
"""
metric_qry = metric_q % (metric, dataType, dataType, dataType, dataType, dataType, dataType, _id, dataType, dataType, dataType, dataType, dataType, dataType, _id, zone)
# raw_input(metric_qry)
try:
res = db.query(metric_qry)[0][0]
except IndexError:
res = 0
return res
# Find the most extreme zone for a given player or team
def get_extreme_zones(dataType, _id, season_id, isCareer, positive_negative, metric):
if isCareer is False:
qry_add = "\nAND season_id = %s" % (season_id.replace('-',''))
yearCareer = "Year"
else:
qry_add = ""
yearCareer = "Career"
if positive_negative == "positive":
order_qry = "DESC"
elif positive_negative == "negative":
order_qry = "ASC"
metric_q = """SELECT shot_zone_basic, %s
FROM(
SELECT
shot_zone_basic, zone_pct, zone_pct_plus, zone_efg_plus, efg_plus, paa
FROM shots_%s_Relative_%s r
JOIN shots_%s_Distribution_%s d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
WHERE %s_id = %s%s
AND shot_zone_area = 'all'
AND shot_zone_basic NOT IN ('all', 'Backcourt')
AND season_type = 'reg'
AND (zone_pct > 0.15 OR r.attempts > 50)
ORDER BY %s %s
) a;"""
metric_qry = metric_q % (metric, dataType, yearCareer, dataType, yearCareer, dataType, dataType, _id, qry_add, metric, order_qry)
# raw_input(metric_qry)
zone_name, zone_value = db.query(metric_qry)[0]
zones_dict = {
'all': 'All',
'Above the Break 3': 'Break3',
'Corner 3': 'Corner3',
'In The Paint (Non-RA)': 'Paint(NonRA)',
'Mid-Range': 'MidRange',
'Restricted Area': 'Restricted',
'Backcourt': 'Backcourt'}
zone_name = zones_dict.get(zone_name)
return zone_name, zone_value
# Get a text description based on a value and category
def get_text_description(category, value):
qry = """SELECT word
FROM percentile_text_descriptors
WHERE category = '%s'
AND percentile_floor = (
SELECT max(percentile_floor)
FROM percentile_text_descriptors
WHERE category = '%s'
AND %s >= percentile_floor
);"""
query = qry % (category, category, value)
text_word = db.query(query)[0][0]
return text_word
# Get percentile values from the percentiles table
def get_percentiles(dataType, _id, season_id, isCareer, zone, metric):
if isCareer is False:
metric_q = """SELECT
ROUND(%s,1)
FROM shots_%s_Relative_Year r
JOIN shots_%s_Distribution_Year d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shots_%s_Breakdown b USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_%s_Year s USING (%s_id, season_id, season_type)
WHERE season_id = %s
AND %s_id = %s
AND shot_zone_area = 'all'
AND shot_zone_basic = '%s'
AND season_type = 'reg'
"""
metric_qry = metric_q % (metric, dataType, dataType, dataType, dataType, dataType, dataType, dataType, season_id.replace('-',''), dataType, _id, zone)
else:
metric_q = """SELECT
ROUND(%s,1)
FROM shots_%s_Relative_Career r
JOIN shots_%s_Distribution_Career d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN(
SELECT
%s_id, season_type, shot_zone_basic, shot_zone_area,
SUM(games) AS games,
SUM(attempts) AS attempts,
SUM(makes) AS makes,
SUM(points) AS points
FROM shots_%s_Breakdown
WHERE %s_id = %s
AND season_type = 'reg'
GROUP BY shot_zone_area, shot_zone_basic, season_type
) b USING (%s_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_%s_Career s USING (%s_id, season_id, season_type)
WHERE %s_id = %s
AND shot_zone_area = 'all'
AND shot_zone_basic = '%s'
AND season_type = 'reg'
"""
metric_qry = metric_q % (metric, dataType, dataType, dataType, dataType, dataType, dataType, _id, dataType, dataType, dataType, dataType, _id, zone)
# raw_input(metric_qry)
try:
res = db.query(metric_qry)[0][0]
except IndexError:
res = 0
return res
#Getting the league efg for the season. If we're looking at a career, we naively use the league efg of all shots since 1996
def get_lg_efg(season_id, isCareer):
if isCareer is False:
q = """SELECT efg
FROM shots_League_Distribution_Year
WHERE season_id = %s
AND shot_zone_basic = 'all'
AND season_type = 'reg'
"""
qry = q % (season_id.replace('-',''))
else:
q = """SELECT efg
FROM shots_League_Distribution_Career
WHERE shot_zone_basic = 'all'
AND season_type = 'reg'
"""
qry = q
lg_efg = db.query(qry)[0][0]
return lg_efg | {
"repo_name": "Connor-R/nba_shot_charts",
"path": "charting/helper_data.py",
"copies": "1",
"size": "9575",
"license": "mit",
"hash": -1167953659886842400,
"line_mean": 35,
"line_max": 232,
"alpha_frac": 0.5953002611,
"autogenerated": false,
"ratio": 3.073836276083467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9147038259565621,
"avg_score": 0.0044196555235691245,
"num_lines": 266
} |
# A set of helper functions for the nba_shot_chart codebase
import urllib
import os
import csv
import sys
import math
import pandas as pd
import numpy as np
import matplotlib as mpb
import matplotlib.pyplot as plt
from matplotlib import offsetbox as osb
from matplotlib.patches import RegularPolygon
from datetime import date
from py_data_getter import data_getter
from py_db import db
import helper_data
db = db('nba_shots')
# setting the color map we want to use
mymap = mpb.cm.OrRd
np.seterr(divide='ignore', invalid='ignore')
#Drawing the outline of the court
#Most of this code was recycled from Savvas Tjortjoglou [http://savvastjortjoglou.com]
def draw_court(ax=None, color='white', lw=2, outer_lines=False):
from matplotlib.patches import Circle, Rectangle, Arc
if ax is None:
ax = plt.gca()
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color)
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
corner_three_a = Rectangle((-220, -50.0), 0, 140, linewidth=lw,
color=color)
corner_three_b = Rectangle((219.75, -50.0), 0, 140, linewidth=lw, color=color)
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
for element in court_elements:
ax.add_patch(element)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
return ax
# we set gridNum to be 30 (basically a grid of 30x30 hexagons)
def shooting_plot(dataType, path, shot_df, _id, season_id, _title, _name, isCareer=False, min_year = 0, max_year = 0, plot_size=(24,24), gridNum=30):
# get the shooting percentage and number of shots for all bins, all shots, and a subset of some shots
(ShootingPctLocs, shotNumber), shot_count_all = find_shootingPcts(shot_df, gridNum)
all_efg_percentile = float(helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'EFG_Percentile'))
color_efg = max(min((all_efg_percentile/100),1.0),0.0)
paa = float(helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'paa'))
# set the figure for drawing on
fig = plt.figure(figsize=(24,24))
# cmap will be used as our color map going forward
cmap = mymap
# where to place the plot within the figure, first two attributes are the x_min and y_min, and the next 2 are the % of the figure that is covered in the x_direction and y_direction (so in this case, our plot will go from (0.05, 0.15) at the bottom left, and stretches to (0.85,0.925) at the top right)
ax = plt.axes([0.05, 0.15, 0.81, 0.775])
# setting the background color using a hex code (http://www.rapidtables.com/web/color/RGB_Color.htm)
# ax.set_facecolor('#0C232E')
ax.set_facecolor('#152535')
# draw the outline of the court
draw_court(outer_lines=False)
# specify the dimensions of the court we draw
plt.xlim(-250,250)
plt.ylim(370, -30)
# drawing the bottom right image
zoom = 1 # we don't need to zoom the image at all
if dataType == 'player':
img = acquire_playerPic(_id, zoom)
else:
img = acquire_teamPic(season_id, _title, _id, zoom)
ax.add_artist(img)
# specify the % a zone that we want to correspond to a maximum sized hexagon [I have this set to any zone with >= 1% of all shots will have a maximum radius, but it's free to be changed based on personal preferences]
max_radius_perc = 1.0
max_rad_multiplier = 100.0/max_radius_perc
# changing to what power we want to scale the area of the hexagons as we increase/decrease the radius. This value can also be changed for personal preferences.
area_multiplier = (3./4.)
lg_efg = float(helper_data.get_lg_efg(season_id, isCareer))
# draw hexagons
# i is the bin#, and shots is the shooting% for that bin
for i, shots in enumerate(ShootingPctLocs):
x,y = shotNumber.get_offsets()[i]
# we check the distance from the hoop the bin is. If it in 3pt territory, we add a multiplier of 1.5 to the shooting% to properly encapsulate eFG%
dist = math.sqrt(x**2 + y**2)
mult = 1.0
if abs(x) >= 220:
mult = 1.5
elif dist/10 >= 23.75:
mult = 1.5
else:
mult = 1.0
# Setting the eFG% for a bin, making sure it's never over 1 (our maximum color value)
color_pct = ((shots*mult)/lg_efg)-0.5
bin_pct = max(min(color_pct, 1.0), 0.0)
hexes = RegularPolygon(
shotNumber.get_offsets()[i], #x/y coords
numVertices=6,
radius=(295/gridNum)*((max_rad_multiplier*((shotNumber.get_array()[i]))/shot_count_all)**(area_multiplier)),
color=cmap(bin_pct),
alpha=0.95,
fill=True)
# setting a maximum radius for our bins at 295 (personal preference)
if hexes.radius > 295/gridNum:
hexes.radius = 295/gridNum
ax.add_patch(hexes)
# creating the frequency legend
# we want to have 4 ticks in this legend so we iterate through 4 items
for i in range(0,4):
base_rad = max_radius_perc/4
# the x,y coords for our patch (the first coordinate is (-205,415), and then we move up and left for each addition coordinate)
patch_x = -205-(10*i)
patch_y = 365-(14*i)
# specifying the size of our hexagon in the frequency legend
patch_rad = (299.9/gridNum)*((base_rad+(base_rad*i))**(area_multiplier))
patch_perc = base_rad+(i*base_rad)
# the x,y coords for our text
text_x = patch_x + patch_rad + 2
text_y = patch_y
patch_axes = (patch_x, patch_y)
# the text will be slightly different for our maximum sized hexagon,
if i < 3:
text_text = ' %s%% of Attempted Shots' % ('%.2f' % patch_perc)
else:
text_text = '$\geq$%s%% of Attempted Shots' %(str(patch_perc))
# draw the hexagon. the color=map(eff_fg_all_float/100) makes the hexagons in the legend the same color as the player's overall eFG%
patch = RegularPolygon(patch_axes, numVertices=6, radius=patch_rad, color=cmap(color_efg), alpha=0.95, fill=True)
ax.add_patch(patch)
# add the text for the hexagon
ax.text(text_x, text_y, text_text, fontsize=16, horizontalalignment='left', verticalalignment='center', family='DejaVu Sans', color='white', fontweight='bold')
# Add a title to our frequency legend (the x/y coords are hardcoded).
# Again, the color=map(eff_fg_all_float/100) makes the hexagons in the legend the same color as the player's overall eFG%
ax.text(-235, 310, 'Zone Frequencies', fontsize=16, horizontalalignment='left', verticalalignment='bottom', family='DejaVu Sans', color=cmap(color_efg), fontweight='bold')
# Add a title to our chart (just the player's name)
chart_title = "%s | %s" % (_title.upper(), season_id)
ax.text(31.25,-40, chart_title, fontsize=32, horizontalalignment='center', verticalalignment='bottom', family='DejaVu Sans', color=cmap(color_efg), fontweight='bold')
# Add user text
ax.text(-250,-31,'CHARTS BY CONNOR REED (@NBAChartBot)',
fontsize=16, horizontalalignment='left', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
# Add data source text
ax.text(31.25,-31,'DATA FROM STATS.NBA.COM',
fontsize=16, horizontalalignment='center', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
# Add date text
_date = date.today()
ax.text(250,-31,'AS OF %s' % (str(_date)),
fontsize=16, horizontalalignment='right', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
key_text = get_key_text(dataType, _id, season_id, isCareer)
# adding breakdown of eFG% by shot zone at the bottom of the chart
ax.text(307,380, key_text, fontsize=20, horizontalalignment='right', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.5)
if dataType == 'player':
teams_text, team_len = get_teams_text(_id, season_id, isCareer)
else:
teams_text = _title
team_len = 0
# adding which season the chart is for, as well as what teams the player is on
if team_len > 12:
ax.text(-250,380, season_id + ' Regular Season:\n' + teams_text,
fontsize=20, horizontalalignment='left', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.4)
else:
ax.text(-250,380, season_id + ' Regular Season:\n' + teams_text,
fontsize=20, horizontalalignment='left', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.6)
# adding a color bar for reference
ax2 = fig.add_axes([0.875, 0.15, 0.04, 0.775])
cb = mpb.colorbar.ColorbarBase(ax2,cmap=cmap, orientation='vertical')
cbytick_obj = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cbytick_obj, color='white', fontweight='bold',fontsize=16)
cb.set_label('EFG+ (100 is League Average)', family='DejaVu Sans', color='white', fontweight='bold', labelpad=-4, fontsize=24)
cb.set_ticks([0.0, 0.25, 0.5, 0.75, 1.0])
cb.set_ticklabels(['$\mathbf{\leq}$50','75', '100','125', '$\mathbf{\geq}$150'])
figtit = path+'%s(%s)_%s.png' % (_name, _id, season_id.replace('PBP ERA (1996/97 onward)','').replace(' ',''))
plt.savefig(figtit, facecolor='#2E3748', edgecolor='black')
plt.clf()
#Producing the text for the bottom of the shot chart
def get_key_text(dataType, _id, season_id, isCareer, isTwitter=False):
text = ''
total_atts = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'r.attempts'))
total_makes = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'b.makes'))
total_games = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'r.games'))
total_attPerGame = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'r.attempts/r.games'))
vol_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'AttemptsPerGame_percentile'))
vol_word = helper_data.get_text_description('AttemptsPerGame', vol_percentile)
vol_text = '$\mathbf{' + vol_word.upper() + '}$ Volume | ' + str(total_makes) + ' for ' + str(total_atts) + ' in ' + str(total_games) + ' Games | ' + str(total_attPerGame) + ' FGA/Game, $\mathbf{P_{' + str(vol_percentile) + '}}$'
vol_twitter_text = 'Volume: ' + vol_word.upper() + ' | P_' + str(vol_percentile) + ' (percentile)'
shotSkillPlus = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 's.ShotSkillPlus'))
shotSkill_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'shotSkill_Percentile'))
shotSkill_word = helper_data.get_text_description('shotSkill', shotSkill_percentile)
shotSkill_text = '$\mathbf{' + shotSkill_word.upper() + '}$ Shot Skill | ' + str(shotSkillPlus) + ' ShotSkill+, $\mathbf{P_{' + str(shotSkill_percentile) + '}}$'
shotSkill_twitter_text = 'Shot Skill: ' + shotSkill_word.upper() + ' | P_' + str(shotSkill_percentile)
efg = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'd.efg*100'))
efgPlus = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'r.efg_plus'))
efg_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'EFG_Percentile'))
efg_word = helper_data.get_text_description('EFG', efg_percentile)
efg_text = '$\mathbf{' + efg_word.upper() + '}$ Efficiency | ' + str(efg) + ' EFG% | ' + str(efgPlus) + ' EFG+, $\mathbf{P_{' + str(efg_percentile) + '}}$'
efg_twitter_text = 'Efficiency: ' + efg_word.upper() + ' | P_' + str(efg_percentile)
PAA = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'r.paa'))
PAAperGame = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'r.paa_per_game'))
PAA_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'PAAperGame_percentile'))
PAA_word = helper_data.get_text_description('PAAperGame', PAA_percentile)
PAA_text = '$\mathbf{' + PAA_word.upper() + '}$ Efficiency Value Added | ' + str(PAA) + ' Total PAA | ' + str(PAAperGame) + ' PAA/Game, $\mathbf{P_{' + str(PAA_percentile) + '}}$'
PAA_twitter_text = 'Efficiency Value: ' + PAA_word.upper() + ' | P_' + str(PAA_percentile)
fav_zone, fav_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'positive', 'ROUND(zone_pct_plus-100,0)')
if fav_zoneVal >= 0:
fav_zoneTextAdd = "+"
else:
fav_zoneTextAdd = ""
fav_zoneTEXT = '$\mathbf{Favorite Zone}$ (Relative to League Averages) -- $\mathbf{' + str(fav_zone) + '}$ (' + str(fav_zoneTextAdd) + str(fav_zoneVal) + '% distribution)'
fav_twitter_zoneTEXT = 'Favorite Zone: ' + str(fav_zone)
skill_zone, skill_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'positive', 'ROUND(zone_efg_plus-100,0)')
if skill_zoneVal >= 0:
skill_zoneTextAdd = 'above'
else:
skill_zoneTextAdd = 'below'
skill_zoneTEXT = '$\mathbf{Best Skill}$ -- $\mathbf{' + str(skill_zone) + '}$ (' + str(abs(skill_zoneVal)) + '% ' + str(skill_zoneTextAdd) + ' average)'
skill_twitter_zoneTEXT = 'Best Skill Zone: ' + str(skill_zone)
value_zone, value_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'positive', 'ROUND(paa, 0)')
if value_zoneVal >= 0:
value_zoneTextAdd = "+"
else:
value_zoneTextAdd = ""
value_zoneTEXT = '$\mathbf{Best Value}$ -- $\mathbf{' + str(value_zone) + '}$ (' + str(value_zoneTextAdd) + str(value_zoneVal) + ' PAA)'
value_twitter_zoneTEXT = 'Best Value Zone: ' + str(value_zone)
LEASTskill_zone, LEASTskill_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'negative', 'ROUND(zone_efg_plus-100,0)')
if LEASTskill_zoneVal >= 0:
LEASTskill_zoneTextAdd = 'above'
else:
LEASTskill_zoneTextAdd = 'below'
LEASTskill_zoneTEXT = '$\mathbf{Worst Skill}$ -- $\mathbf{' + str(LEASTskill_zone) + '}$ (' + str(abs(LEASTskill_zoneVal)) + '% ' + str(LEASTskill_zoneTextAdd) + ' average)'
LEASTvalue_zone, LEASTvalue_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'negative', 'ROUND(paa, 0)')
if LEASTvalue_zoneVal >= 0:
LEASTvalue_zoneTextAdd = "+"
else:
LEASTvalue_zoneTextAdd = ""
LEASTvalue_zoneTEXT = '$\mathbf{Least Value}$ -- $\mathbf{' + str(LEASTvalue_zone) + '}$ (' + str(LEASTvalue_zoneTextAdd) + str(LEASTvalue_zoneVal) + ' PAA)'
if isTwitter is False:
text += vol_text
text += '\n'+shotSkill_text
text += '\n'+efg_text
text += '\n'+PAA_text
text += '\n'+fav_zoneTEXT
text += '\n'+skill_zoneTEXT
text += ' | '+value_zoneTEXT
text += '\n'+LEASTskill_zoneTEXT
text += ' | '+LEASTvalue_zoneTEXT
else:
text += ':\n\n'+vol_twitter_text
text += '\n'+shotSkill_twitter_text
text += '\n'+efg_twitter_text
text += '\n'+PAA_twitter_text
text += '\n\n'+fav_twitter_zoneTEXT
text += '\n'+skill_twitter_zoneTEXT
text += '\n'+value_twitter_zoneTEXT
return text
#Getting the shooting percentages for each grid.
#The general idea of this function, as well as a substantial block of the actual code was recycled from Dan Vatterott [http://www.danvatterott.com/]
def find_shootingPcts(shot_df, gridNum):
x = shot_df.LOC_X[shot_df['LOC_Y']<425.1]
y = shot_df.LOC_Y[shot_df['LOC_Y']<425.1]
# Grabbing the x and y coords, for all made shots
x_made = shot_df.LOC_X[(shot_df['SHOT_MADE_FLAG']==1) & (shot_df['LOC_Y']<425.1)]
y_made = shot_df.LOC_Y[(shot_df['SHOT_MADE_FLAG']==1) & (shot_df['LOC_Y']<425.1)]
#compute number of shots made and taken from each hexbin location
hb_shot = plt.hexbin(x, y, gridsize=gridNum, extent=(-250,250,425,-50));
plt.close()
hb_made = plt.hexbin(x_made, y_made, gridsize=gridNum, extent=(-250,250,425,-50));
plt.close()
#compute shooting percentage
ShootingPctLocs = hb_made.get_array() / hb_shot.get_array()
ShootingPctLocs[np.isnan(ShootingPctLocs)] = 0 #makes 0/0s=0
shot_count_all = len(shot_df.index)
# Returning all values
return (ShootingPctLocs, hb_shot), shot_count_all
#Getting the player picture that we will later place in the chart
#Most of this code was recycled from Savvas Tjortjoglou [http://savvastjortjoglou.com]
def acquire_playerPic(player_id, zoom, offset=(250,370)):
try:
img_path = os.getcwd()+'/'+str(player_id)+'.png'
player_pic = plt.imread(img_path)
except (ValueError,IOError):
try:
pic = urllib.urlretrieve("https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/"+str(player_id)+".png",str(player_id)+".png")
player_pic = plt.imread(pic[0])
except (ValueError, IOError):
try:
pic = urllib.urlretrieve("http://stats.nba.com/media/players/230x185/"+str(player_id)+".png",str(player_id)+".png")
player_pic = plt.imread(pic[0])
except (ValueError, IOError):
img_path = os.getcwd()+'/chart_icon.png'
player_pic = plt.imread(img_path)
img = osb.OffsetImage(player_pic, zoom)
img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
return img
#Getting the team picture that we will later place in the chart
def acquire_teamPic(season_id, team_title, team_id, zoom, offset=(250,370)):
abb_file = os.getcwd()+"/../csvs/team_abbreviations.csv"
abb_list = {}
with open(abb_file, 'rU') as f:
mycsv = csv.reader(f)
for row in mycsv:
team, abb, imgurl = row
abb_list[team] = [abb, imgurl]
img_url = abb_list.get(team_title)[1]
try:
img_path = os.getcwd()+'/'+str(team_id)+'.png'
team_pic = plt.imread(img_path)
except IOError:
try:
pic = urllib.urlretrieve(img_url,str(team_id)+'.png')
team_pic = plt.imread(pic[0])
except (ValueError, IOError):
img_path = os.getcwd()+'/nba_logo.png'
player_pic = plt.imread(img_path)
img = osb.OffsetImage(team_pic, zoom)
img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
return img
#Producing the text for the bottom of the shot chart
def get_teams_text(player_id, season_id, isCareer):
if isCareer is True:
season_q = ''
else:
season_q = '\nAND season_id = %s' % (season_id.replace('-',''))
team_q = """SELECT
DISTINCT CONCAT(city, ' ', tname)
FROM shots s
JOIN teams t USING (team_id)
WHERE player_id = %s%s
AND LEFT(season_id, 4) >= t.start_year
AND LEFT(season_id, 4) < t.end_year;
"""
team_qry = team_q % (player_id, season_q)
# raw_input(team_qry)
teams = db.query(team_qry)
team_list = []
for team in teams:
team_list.append(team[0])
team_text = ""
if len(team_list) == 1:
team_text = str(team_list[0])
else:
i = 0
for team in team_list[0:-1]:
if i%3 == 0 and i > 0:
team_text += '\n'
text_add = '%s, ' % str(team)
team_text += text_add
i += 1
if i%3 == 0:
team_text += '\n'
# raw_input(team_list)
team_text += str(team_list[-1])
return team_text, len(team_list)
| {
"repo_name": "Connor-R/nba_shot_charts",
"path": "charting/helper_charting.py",
"copies": "1",
"size": "21226",
"license": "mit",
"hash": -4521192645061640000,
"line_mean": 44.4518201285,
"line_max": 305,
"alpha_frac": 0.6208423631,
"autogenerated": false,
"ratio": 3.042717889908257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41635602530082566,
"avg_score": null,
"num_lines": null
} |
# A set of helper functions for the NSBL codebase
from py_db import db
db = db('NSBL')
def random_sql_helpers():
sql_dict = {
}
def get_team_abb(team_name, year):
qry = db.query("SELECT team_abb FROM teams WHERE year = %s AND team_name = '%s';" % (year, team_name))
if qry != ():
team_abb = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no team_abb for %s, %s" % (year, team_name)
team_abb = None
return team_abb
def get_division(team_name, year):
division_dict = {
}
qry = """SELECT team_name
, division
FROM teams
WHERE 1
AND year = %s
;""" % (year)
res = db.query(qry)
for row in res:
tm, div = row
division_dict[tm] = div
division = division_dict.get(team_name)
divisional_teams = []
conference_teams = []
non_conference_teams = []
for k,v in division_dict.items():
if v == division and k != team_name:
divisional_teams.append(k)
if v[:2] == division[:2] and k != team_name:
conference_teams.append(k)
if v[:2] != division[:2]:
non_conference_teams.append(k)
return division, divisional_teams, conference_teams, non_conference_teams
def get_team_name(city_name, year):
qry = db.query("SELECT team_name FROM teams WHERE year = %s AND city_name = '%s';" % (year, city_name))
if qry != ():
team_name = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no team_name for %s, %s" % (year, city_name)
team_name = None
return team_name
def get_team(mascot_name, year):
qry = db.query("SELECT team_name FROM teams WHERE year = %s AND mascot_name = '%s';" % (year, mascot_name))
if qry != ():
team_name = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no team_name for %s, %s" % (year, mascot_name)
team_name = None
return team_name
def get_mascot_names(team_abb, year):
qry = db.query("SELECT mascot_name FROM teams WHERE year = %s AND (team_abb = '%s' OR spreadsheet_abb = '%s');" % (year, team_abb, team_abb))
if qry != ():
mascot_name = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no mascot_name for %s, %s" % (year, team_abb)
mascot_name = None
return mascot_name
def get_park_factors(team_abb, year):
""""
Scaled park factors (by a factor of 1/3) from fangraphs
"""
qry = db.query("""SELECT park_factor
FROM teams_current_franchise tcf
JOIN teams t ON (tcf.team_name = t.team_name)
WHERE 1
AND (tcf.primary_abb = '%s' OR tcf.secondary_abb = '%s' OR tcf.tertiary_abb = '%s' OR t.team_abb = '%s')
AND year = %s;""" % (team_abb, team_abb, team_abb, team_abb, year))
if qry != ():
park_factor = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no park_factor for %s, %s" % (year, team_abb)
park_factor = 100.0
return park_factor
def get_pos_adj(position):
qry = db.query("SELECT adjustment FROM helper_positional_adjustment WHERE position = '%s';" % (position))
if qry != ():
pos_adj = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no position adjustment for %s" % (position)
pos_adj = 0.0
return pos_adj
def get_pos_formula(position):
"""
Returns coefficients for error/range/arm/passed ball values according to http://www.ontonova.com/floodstudy/4647-5.html.
This should possibly be scaled down?
"""
# [range, error, arm, passed ball]
qry = db.query("SELECT rng, err, arm, passed_ball FROM helper_zips_positions WHERE position = '%s';" % (position))
if qry != ():
pos_formula = [float(qry[0][0]), float(qry[0][1]), float(qry[0][2]), float(qry[0][3])]
else:
print "\n\n!!!!ERROR!!!! - no position formula for %s" % (position)
pos_formula = [0,0,0,0]
# for i, v in enumerate(pos_formula):
# # research from http://dmbo.net/smf/index.php?topic=4883.0 and ad_hoc/defensive_value_analysis.xlsx shows that the original defensive values should be regressed back ~72.5%
# pos_formula[i] = 0.725 * v
return pos_formula
def get_league_average_hitters(year, category):
q = """SELECT
pa,
r,
(h+bb+hbp)/pa as obp,
(1b + 2*2b + 3*3b + 4*hr)/ab as slg,
woba
FROM processed_league_averages_hitting
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_pa, lg_r, lg_obp, lg_slg, lg_woba = query
avgs = {"lg_pa":lg_pa, "lg_r":lg_r, "lg_obp":lg_obp, "lg_slg":lg_slg, "lg_woba":lg_woba}
return avgs.get(category)
def get_zips_average_hitters(year, category):
q = """SELECT
pa,
r,
(h+bb+hbp)/pa as obp,
(1b + 2*2b + 3*3b + 4*hr)/ab as slg,
woba
FROM zips_averages_hitting
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_pa, lg_r, lg_obp, lg_slg, lg_woba = query
avgs = {"lg_pa":lg_pa, "lg_r":lg_r, "lg_obp":lg_obp, "lg_slg":lg_slg, "lg_woba":lg_woba}
return avgs.get(category)
def get_offensive_metrics(year, pf, pa, ab, bb, hbp, _1b, _2b, _3b, hr, sb, cs):
"""
Converts a players offensive boxscore stats in a given year to more advanced stats (ops, wOBA, park_adjusted wOBA, OPS+, wRC, wRC/27, wRC+, RAA, and offensive WAR)
"""
wOBA = ((0.691*bb + 0.722*hbp + 0.884*_1b + 1.257*_2b + 1.593*_3b + 2.058*hr + 0.2*sb - 0.398*cs)/(pa))
park_wOBA = wOBA/pf
h = _1b + _2b + _3b + hr
if pa != 0:
obp = (h + bb + hbp)/float(pa)
else:
obp = 0.0
if ab != 0:
slg = (_1b + 2*_2b + 3*_3b + 4*hr)/float(ab)
else:
slg = 0.0
ops = obp+slg
lg_obp = float(get_league_average_hitters(year,'lg_obp'))
lg_slg = float(get_league_average_hitters(year,'lg_slg'))
OPS_plus = 100*(((obp/pf)/lg_obp)+((slg/pf)/lg_slg)-1)
lg_woba = float(get_league_average_hitters(year,'lg_woba'))
lg_r = float(get_league_average_hitters(year,'lg_r'))
lg_pa = float(get_league_average_hitters(year,'lg_pa'))
wrc = (((park_wOBA-lg_woba)/1.15)+(lg_r/lg_pa))*pa
if (ab-h) != 0:
wrc27 = wrc*27/(ab-h)
else:
wrc27 = 0.0
wRC_plus = ((wrc/pa/(lg_r/lg_pa)*100))
raa = pa*((park_wOBA-lg_woba)/1.25)
oWAR = raa/10
return ops, wOBA, park_wOBA, OPS_plus, wrc, wrc27, wRC_plus, raa, oWAR
def get_zips_offensive_metrics(year, pf, pa, ab, bb, hbp, _1b, _2b, _3b, hr, sb, cs):
"""
Converts a players offensive zips boxscore stats in a given year to more advanced stats (ops, wOBA, park_adjusted wOBA, OPS+, wRC, wRC/27, wRC+, RAA, and offensive WAR)
"""
wOBA = ((0.691*bb + 0.722*hbp + 0.884*_1b + 1.257*_2b + 1.593*_3b + 2.058*hr + 0.2*sb - 0.398*cs)/(pa))
park_wOBA = wOBA/pf
h = _1b + _2b + _3b + hr
if pa != 0:
obp = (h + bb + hbp)/float(pa)
else:
obp = 0.0
if ab != 0:
slg = (_1b + 2*_2b + 3*_3b + 4*hr)/float(ab)
else:
slg = 0.0
ops = obp+slg
lg_obp = float(get_zips_average_hitters(year,'lg_obp'))
lg_slg = float(get_zips_average_hitters(year,'lg_slg'))
OPS_plus = 100*(((obp/pf)/lg_obp)+((slg/pf)/lg_slg)-1)
lg_woba = float(get_zips_average_hitters(year,'lg_woba'))
lg_r = float(get_zips_average_hitters(year,'lg_r'))
lg_pa = float(get_zips_average_hitters(year,'lg_pa'))
wrc = (((park_wOBA-lg_woba)/1.15)+(lg_r/lg_pa))*pa
if (ab-h) != 0:
wrc27 = wrc*27/(ab-h)
else:
wrc27 = 0.0
wRC_plus = ((wrc/pa/(lg_r/lg_pa)*100))
raa = pa*((park_wOBA-lg_woba)/1.25)
oWAR = raa/10
return ops, wOBA, park_wOBA, OPS_plus, wrc, wrc27, wRC_plus, raa, oWAR
def get_league_average_pitchers(year, category):
q = """SELECT
r,
gs,
era,
era as fip,
fip_const
FROM processed_league_averages_pitching
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_r, lg_gs, lg_era, lg_fip, fip_const = query
avgs = {"lg_r":lg_r, "lg_gs":lg_gs, "lg_era":lg_era, "lg_fip":lg_fip, "fip_const":fip_const}
return avgs.get(category)
def get_zips_average_pitchers(year, category):
q = """SELECT
r,
gs,
era,
era as fip,
fip_const
FROM zips_averages_pitching
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_r, lg_gs, lg_era, lg_fip, fip_const = query
avgs = {"lg_r":lg_r, "lg_gs":lg_gs, "lg_era":lg_era, "lg_fip":lg_fip, "fip_const":fip_const}
return avgs.get(category)
def get_pitching_metrics(metric_9, ip, year, pf, g, gs, _type):
"""
Converts a players pitching boxscore stats in a given year to either parkadjusted FIP/ERA, FIP-/ERA-, fWAR/rWAR.
"""
park_metric = metric_9/pf
search_metric = 'lg_' + _type
lg_metric = float(get_league_average_pitchers(year, search_metric))
metric_min = 100*(park_metric/lg_metric)
RApxMETRIC = float(park_metric)/0.92
lg_r = float(get_league_average_pitchers(year, 'lg_r'))
lg_gs = float(get_league_average_pitchers(year, 'lg_gs'))
metric_RE = ((((18-(float(ip)/float(g)))*(float(lg_r)/float(lg_gs))+(float(ip)/float(g))*RApxMETRIC)/18)+2)*1.5
if (float(gs)/float(g)) > 0.5:
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.5
METRIC_x_win_9 = METRIC_x_win - 0.38
else:
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.52
METRIC_x_win_9 = METRIC_x_win - 0.46
METRIC_WAR = METRIC_x_win_9*float(ip)/9.0
return park_metric, metric_min, METRIC_WAR
def get_zips_pitching_metrics(metric_9, ip, year, pf, g, gs, _type):
park_metric = metric_9/pf
search_metric = 'lg_' + _type
lg_metric = float(get_zips_average_pitchers(year, search_metric))
metric_min = 100*(park_metric/lg_metric)
RApxMETRIC = float(park_metric)/0.92
lg_r = float(get_zips_average_pitchers(year, 'lg_r'))
lg_gs = float(get_zips_average_pitchers(year, 'lg_gs'))
metric_RE = ((((18-(float(ip)/float(g)))*(float(lg_r)/float(lg_gs))+(float(ip)/float(g))*RApxMETRIC)/18)+2)*1.5
if (gs >= 3 or float(gs)/float(g) > 0.4):
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.5
METRIC_x_win_9 = METRIC_x_win - 0.38
else:
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.52
METRIC_x_win_9 = METRIC_x_win - 0.46
METRIC_WAR = METRIC_x_win_9*float(ip)/9.0
return park_metric, metric_min, METRIC_WAR
def get_hand(player_name):
if player_name[len(player_name)-1:] == "*":
hand = 'l'
elif player_name[len(player_name)-1:] == "#":
hand = 's'
else:
hand = 'r'
return hand
def get_def_values(search_name, position, year):
"""
Gets the baseline defensive ratings for a player given the desired position and year.
"""
p = position.lower()
pos = position.upper()
rn = '%s_range' % p
er = '%s_error' % p
arm, pb = 0,2
if p == 'c':
arm = 'c_arm'
pb = 'c_pb'
elif p in ('lf','rf','cf'):
arm = 'of_arm'
try:
if p not in ('p','dh', 'ph', 'if', 'sp', 'rp', 'of'):
rtg_q = """SELECT
%s,
%s,
%s,
%s
FROM zips_defense
WHERE year = %s
AND player_name = '%s'"""
rtg_qry = rtg_q % (rn, er, arm, pb, year, search_name)
rtgs = db.query(rtg_qry)[0]
else:
rtgs = (0,0,0,0)
except IndexError:
rtgs = (0,0,0,0)
_r, error, _a, passed_ball = rtgs
if error is None:
error = 100
if _r is None:
_r = 'AV'
if _a is None:
_a = 'AV'
if passed_ball is None:
passed_ball = 2
_range = str(_r)
_arm = str(_a)
# range and arm text values need to translate to numeric values
if _range.upper() in ('PO','PR'):
num_rn = -2
elif _range.upper() in ('FR',):
num_rn = -1
elif _range.upper() in ('AV','AVG'):
num_rn = 0
elif _range.upper() in ('VG',):
num_rn = 1
elif _range.upper() in ('EX',):
num_rn = 2
else:
num_rn = 0
if _arm.upper() in ('PO','PR'):
num_arm = -2
elif _arm.upper() in ('FR',):
num_arm = -1
elif _arm.upper() in ('AV','AVG'):
num_arm = 0
elif _arm.upper() in ('VG',):
num_arm = 1
elif _arm.upper() in ('EX',):
num_arm = 2
else:
num_arm = 0
weights = get_pos_formula(pos)
rn_val = float(weights[0])*num_rn
#100 is average error rating. we want the amount above/below this number
err_val = float(weights[1])*((100-float(error))/100)
arm_val = float(weights[2])*num_arm
pb_val = float(weights[3])*(2-passed_ball)
return rn_val, err_val, arm_val, pb_val
def input_name(player_name):
chk_val = db.query('select count(*) from name_mapper where wrong_name = "%s"' % (player_name.replace('"', '""')))[0][0]
if chk_val == 0:
right_fname = player_name.split(" ")[0]
right_lname = ' '.join(player_name.split(" ")[1:])
entry = {'wrong_name': player_name, 'right_fname': right_fname, 'right_lname': right_lname}
db.insertRowDict(entry, 'name_mapper', insertMany=False, replace=True, rid=0,debug=1)
db.conn.commit()
print "\t\tEntered new player %s --> '%s'+'%s'" % (player_name, right_fname, right_lname)
| {
"repo_name": "Connor-R/NSBL",
"path": "NSBL_helpers.py",
"copies": "1",
"size": "13283",
"license": "mit",
"hash": 4358234479631486000,
"line_mean": 29.819025522,
"line_max": 182,
"alpha_frac": 0.5599638636,
"autogenerated": false,
"ratio": 2.668340699075934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37283045626759337,
"avg_score": null,
"num_lines": null
} |
"""A set of helper functions to work with the astropy module."""
import functools
import random
import string
import tempfile
import subprocess
import collections
from itertools import cycle, islice, chain, combinations, zip_longest
import scipy
import numpy as np
from astropy.table import Table, join
from astropy.coordinates import SkyCoord
from astropy import units as u
#from astroquery.vizier import Vizier
###############################################################################
# Astropy Utilities #
###############################################################################
def change_column_dtype(table, colname, newdtype):
'''Changes the dtype of a column in a table.
Use this function to change the dtype of a particular column in a table.
'''
tempcol = table[colname]
colindex = table.colnames.index(colname)
del(table[colname])
table.add_column(np.asanyarray(tempcol, dtype=newdtype), index=colindex)
def astropy_table_index(table, column, value):
'''Returns the row index of the table which has the value in column.
There are often times when you want to know the index of the row
where a certain column has a value. This function will return a
list of row indices that match the value in the column.'''
return astropy_table_indices(table, column, [value])
def astropy_table_indices(table, column, values):
'''Returns the row indices of the table which have the values in column.
If you need to get the indices of values located in the column of a table,
this function will determine that for you.
'''
indices = mark_selections_in_columns(table[column], values)
return np.where(indices)
def mark_selections_in_columns(col, values):
'''Return index indicating values are in col.
Returns an index array which is the size of col that indicates True when
col holds an entry equal to value, and False otherwise.'''
if len(col) > len(values)**2:
return multi_logical_or(*[col == v for v in values])
else:
try:
valset = set(values)
except TypeError:
unmasked_values = values[values.mask == False]
valset = set(unmasked_values)
index = []
for v in col:
try:
incol = v in valset
except TypeError:
incol = False
index.append(incol)
return np.array(index, dtype=np.bool)
def multi_logical_or(*arrs):
'''Performs a logical or for an arbitrary number of boolean arrays.'''
return functools.reduce(np.logical_or, arrs, False)
def multi_logical_and(*arrs):
'''Performs a logical or for an arbitrary number of boolean arrays.'''
return functools.reduce(np.logical_and, arrs, True)
def astropy_table_row(table, column, value):
'''Returns the row of the table which has the value in column.
If you want to know the row in an astropy table where a value in a
column corresponds to a given value, this function will return that
row. If there are multiple rows which match the value in the
column, you will get all of them. If no rows match the value, this
function will throw a ValueError.'''
return table[astropy_table_index(table, column, value)]
def extract_subtable_from_column(table, column, selections):
'''Returns a table which only contains values in selections.
This function will create a Table whose values in column are only
those found in selections.
'''
return table[astropy_table_indices(table, column, selections)]
def filter_column_from_subtable(table, column, selections):
'''Returns a table where none of the values in column are selections.
This function will create a Table whose values are those in column which
are not found in selections.
'''
subindices = astropy_table_indices(table, column, selections)
compindices = get_complement_indices(subindices, len(table))
return table[compindices]
def join_by_id(table1, table2, columnid1, columnid2, join_type="inner",
conflict_suffixes=("_A", "_B"), idproc=None,
additional_keys=[]):
'''Joins two tables based on columns with different names.
Table1 and table2 are the tables to be joined together. The column names
that should be joined are the two columnids. Columnid1 will be the column
name for the returned table. In case of conflicts, the
conflict suffixes will be appended to the keys with conflicts. To merge
conflicts instead of keeping them separate, add the column name to
additional_keys.
If the entries in the columns to be merged should be processed a certain
way, the function that does the processing should be given in idfilter. For
no processing, "None" should be passed instead.
'''
# Process the columns if need be.
if idproc is not None:
# I want to duplicate the data so it won't be lost. And by keeping it
# in the table, it will be preserved when it is joined.
origcol1 = table1[columnid1]
origcol2 = table2[columnid2]
randomcol1 = generate_random_string(10)
randomcol2 = generate_random_string(10)
table1.rename_column(columnid1, randomcol1)
table2.rename_column(columnid2, randomcol2)
table1[columnid1] = idproc(origcol1)
table2[columnid2] = idproc(origcol2)
# If columnid1 = columnid2, then we can go straight to a join. If not, then
# columnid2 needs to be renamed to columnid1. If table2[columnid1] exists,
# then we have a problem and an exception should be thrown.
if columnid1 != columnid2:
if columnid1 not in table2.colnames:
table2[columnid1] = table2[columnid2]
else:
raise ValueError(
"Column {0} already exists in second table.".format(columnid1))
try:
newtable = join(
table1, table2, keys=[columnid1]+additional_keys,
join_type=join_type, table_names=list(conflict_suffixes),
uniq_col_name="{col_name}{table_name}")
finally:
# Clean up the new table.
if columnid1 != columnid2:
del(table2[columnid1])
if idproc is not None:
del(table1[columnid1])
del(table2[columnid2])
del(newtable[randomcol1])
del(newtable[randomcol2])
table1.rename_column(randomcol1, columnid1)
table2.rename_column(randomcol2, columnid2)
return newtable
def join_by_ra_dec(
table1, table2, ra1="RA", dec1="DEC", ra2="RA", dec2="DEC",
ra1_unit=u.degree, dec1_unit=u.degree, ra2_unit=u.degree, dec2_unit=u.degree,
match_threshold=5*u.arcsec, join_type="inner",
conflict_suffixes=("_A", "_B")):
'''Join two tables by RA and DEC.
This function will essentially perform a join between tables using
coordinates. The column names for the coordinates should be given in ra1,
ra2, dec1, dec2.
In case of conflicts, the conflict_suffices will be used for columns in
table1 and table2, respectively.
'''
# Instead of directly using RA/Dec, we'll set up a column that maps rows in
# table 2 to rows in table2.
match_column = generate_random_string(10)
ra1_coords = table1[ra1]
try:
ra1_coords = ra1_coords.to(ra1_unit)
except u.UnitConversionError:
ra1_coords = ra1_coords * ra1_unit
dec1_coords = table1[dec1]
try:
dec1_coords = dec1_coords.to(dec1_unit)
except u.UnitConversionError:
dec1_coords = dec1_coords * dec1_unit
ra2_coords = table2[ra2]
try:
ra2_coords = ra2_coords.to(ra2_unit)
except u.UnitConversionError:
ra2_coords = ra2_coords * ra2_unit
dec2_coords = table2[dec2]
try:
dec2_coords = dec2_coords.to(dec2_unit)
except u.UnitConversionError:
dec2_coords = dec2_coords * dec2_unit
# This will cross-match the two catalogs to find the nearest matches.
coords1 = SkyCoord(ra=ra1_coords, dec=dec1_coords)
coords2 = SkyCoord(ra=ra2_coords, dec=dec2_coords)
idx, d2d, d3d = coords1.match_to_catalog_sky(coords2)
# We only count matches which are within the match threshold.
matches = d2d < match_threshold
matched_tbl1 = table1[matches]
try:
table2[match_column] = np.arange(len(table2))
matched_tbl1[match_column] = table2[idx[matches]][match_column]
newtable = join(
matched_tbl1, table2, keys=match_column,
join_type=join_type, table_names=list(conflict_suffixes),
uniq_col_name="{col_name}{table_name}")
finally:
del(table2[match_column])
del(newtable[match_column])
# Want to inherit table1 column naming.
# This will require deleting the table2 coordinates from the new table.
try:
del(newtable[ra2])
except KeyError:
# This occurs when ra1=ra2.
assert ra1==ra2
newtable.rename_column(ra1+conflict_suffixes[0], ra1)
del(newtable[ra2+conflict_suffixes[1]])
try:
del(newtable[dec2])
except KeyError:
assert dec1==dec2
newtable.rename_column(dec1+conflict_suffixes[0], dec1)
del(newtable[dec2+conflict_suffixes[1]])
return newtable
def generate_random_string(length):
'''Generate a random string with the given length.'''
return "".join([random.choice(string.ascii_letters) for _ in
range(length)])
def get_complement_indices(initindices, tablelength):
'''Returns the indices corresponding to rows not in partialtable.
This function essenially creates indices which correspond to the rows in
totaltable rows not in partialtable.
'''
compmask = np.ones(tablelength, np.bool)
compmask[initindices] = 0
return np.where(compmask)
def get_complement_table(partialtable, totaltable, compcolumn):
'''Returns a subtable of total table without rows in partialtable.
This is kinda like an operation to create a table which when stacked with
partialtable and sorted by compcolumn, will create totaltable.
'''
partialindices = astropy_table_indices(totaltable, compcolumn,
partialtable[compcolumn])
compmask = get_complement_indices(partialindices, len(totaltable))
comp_sample = totaltable[compmask]
return comp_sample
def split_table_by_value(table, column, splitvalue):
'''Bifurcates a table in two.
This function splits a table based on the values in column and returns two
tables in a 2-tuple. Values less than splitvalue are in the first tuple.
Values greater than splitvalue are in the second.
'''
lowentries = table[np.where(table[column] < splitvalue)]
highentries = table[np.where(table[column] >= splitvalue)]
return lowentries, highentries
def first_row_in_group(tablegroup):
'''Iterates through groups and selects the first row from each group.
This is good for tables where there are multiple entries for each grouping,
but the first row in the table is the preferable one. Such a thing occurs
with the Catalog of Active Binary Systems (III).
'''
rowholder = []
for group in tablegroup.groups:
rowholder.append(group[0])
filteredtable = Table(rows=rowholder, names=tablegroup.colnames)
return filteredtable
def byte_to_unicode_cast(bytearr):
'''Cast a numpy byte array to unicode.
A change in Astropy 3.0 led to some columns from FITS files being stored
as numpy byte arrays instead of string. This is an explicit cast of this
column to a string array.
https://github.com/astropy/astropy/pull/6821
The text in the bug report seems to indicate that conversion from bytes
objects to unicode should be done transparently, but this doesn't seem to
be the case.'''
strcol = np.asarray(bytearr, np.unicode_)
return strcol
def set_numeric_fill_values(table, fill_value):
'''Fill all of the columns in table specified in colnames.
This is a convenience function to be able to conveniently get a filled
table without having to manually fill a ton of columns.'''
for col in table.colnames:
if np.issubdtype(table[col].dtype, np.number):
table[col].fill_value = fill_value
def mask_numeric_fill_values(table, fill_value):
'''Fill all of the columns in table specified in colnames.
This convenience function to mask numeric columns in a table.'''
for col in table.colnames:
if np.issubdtype(table[col].dtype, np.number):
table[col] = np.ma.masked_values(table[col], fill_value)
###############################################################################
# Astroquery Catalog #
###############################################################################
def Vizier_cached_table(tblpath, tablecode):
'''Read a table from disk, querying Vizier if needed.
For large tables which can be automatically queried from Vizier, but take a
long time to download, this function will download the queried table into
tblpath, and then read from it for all following times.
The tablecode is the code (e.g. "J/A+A/512/A54/table8") uniquely
identifying the desired table.'''
try:
tbl = Table.read(str(tblpath), format="ascii.ipac")
except FileNotFoundError:
Vizier.ROW_LIMIT = -1
tbl = Vizier.get_catalogs(tablecode)[0]
tbl.write(str(tblpath), format="ascii.ipac")
return tbl
###############################################################################
# Spreadsheet help #
###############################################################################
def inspect_table_as_spreadsheet(table):
'''Opens the table in Libreoffice.
For cases where it would be much easier to look at data by analyzing it in
a spreadsheet, this function will essentially take the table and load it
into Libreoffice so that operations can be done on it.
'''
with tempfile.NamedTemporaryFile() as fp:
table.write(fp.name, format="ascii.csv")
libreargs = ["oocalc", fp.name]
try:
subprocess.run(libreargs)
except FileNotFoundError:
libreargs[0] = "localc"
subprocess.run(libreargs)
def inspect_table_in_topcat(table):
'''Opens the table in TOPCAT
TOPCAT is a useful tool for inspecting tables that are suited to be written
as FITS files. TOPCAT is actually much more extensible than we are using it
for, but it's helpful for this purpose.
'''
with tempfile.NamedTemporaryFile() as fp:
table.write(fp.name, format="fits", overwrite=True)
topcatargs = ["/home/regulus/simonian/topcat/topcat", fp.name]
subprocess.run(topcatargs)
###############################################################################
# Caching large data files #
###############################################################################
class memoized(object):
'''Decorator. Cache's a function's return value each time it is called. If
called later with the same arguments, the cached value is returned (not
reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up
print("Uncacheable")
return self.func(*args)
if args in self.cache:
print("Cached")
return self.cache[args]
else:
print("Putting into cache")
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def shortcut_file(filename, format="fits", fill_value=-9999):
''' Return a decorator that both caches the result and saves it to a file.
This decorator should be used for commonly used snippets and combinations
of tables that are small enough to be read in quickly, and processed enough
that generating them from scratch is time-intensive.
'''
class Memorize(object):
'''
A function decorated with @memorize caches its return value every time
it is called. If the function is called later with the same arguments,
the cached value is returned (the function is not reevaluated). The
cache is stored in the filename provided in shortcut_file for reuse in
future executions. If the function corresponding to this decorated has
been updated, make sure to change the object at the given filename.
'''
def __init__(self, func):
self.func = func
self.filename = filename
self.table = None
def __call__(self, *args):
if self.table is None:
try:
self.read_cache()
except FileNotFoundError:
value = self.func(*args)
self.table = value
self.save_cache()
return self.table
def read_cache(self):
'''
Read the table in from the given location. This will take the
format given in the shortcut_file command.
'''
self.table = Table.read(self.filename, format=format,
character_as_bytes=False)
mask_numeric_fill_values(self.table, fill_value)
# If the dtype is fits, then the Astropy FITS program doesn't
# convert correctly between bytes and strings.
# See https://github.com/astropy/astropy/issues/5280
def save_cache(self):
'''
Save the table into the given filename using the given format.
'''
set_numeric_fill_values(self.table, fill_value)
try:
self.table.write(self.filename, format=format)
except FileNotFoundError:
self.filename.parent.mkdir(parents=True)
self.table.write(self.filename, format=format)
def __repr__(self):
''' Return the function's docstring. '''
return self.func.__doc__
def __get__(self, obj, objtype):
''' Support instance methods. '''
return functools.partial(self.__call__, obj)
return Memorize
###############################################################################
# Itertools help #
###############################################################################
def roundrobin(*iterables):
'''roundrobin('ABC', 'D', 'EF') --> ADEBFC'''
# Recipe cedited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def take(n, iterable):
'''Return first n items of the iterable as a list.'''
return list(islice(iterable, n))
def flatten(listOfLists):
"Flatten one level of nesting"
return chain.from_iterable(listOfLists)
def random_permutation(iterable, r=None):
"""Random selection from itertools.product(*args, **kwds)"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"Returns the nth item or a default value"
return next(islice(iterable, n, None), default)
def zip_equal(*iterables):
'''Unzips, throwing an error if iterables have different lengths.'''
sentinel = object()
for combo in zip_longest(*iterables, fillvalue=sentinel):
if sentinel in combo:
raise ValueError("Iterables have different lengths")
yield combo
###############################################################################
# Binary confidence intervals #
###############################################################################
def poisson_upper(n, sigma):
'''Return the Poisson upper limit of the confidence interval.
This is the upper limit for a given number of successes n, and the width of
the confidence interval is given in sigmas.'''
up = (n+1)*(1 - 1/9/(n+1) + sigma/3/np.sqrt(n+1))**3
return up
def scaled_poisson_upper(n, sigma, scale):
'''Return the upper limit of a scaled Poisson variable.
This is the upper limit for a given number of successes if the random
variable was scaled by a scale factor.'''
confidence_level = scipy.stats.norm.cdf(sigma)
upperlim = scipy.stats.chi2.ppf(1-(1-confidence_level)/scale, 2*n+2)/2
return upperlim
def scaled_poisson_lower(n, sigma, scale):
'''Return the lower limit of a scaled Poisson variable.
This is the lower limit for a given number of successes if the random
variable was scaled by a scale factor.'''
confidence_level = scipy.stats.norm.cdf(sigma)
lowerlim = scipy.stats.chi2.ppf(1-confidence_level/scale, 2*n)/2
return lowerlim
def poisson_upper_exact(n, sigma):
'''Return the Poisson upper limit of the confidence interval.
This is the upper limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This expression uses a
root-finding algorithm as opposed to an approximation.'''
confidence_level = scipy.stats.norm.cdf(sigma)
upperlim = scipy.stats.chi2.ppf(confidence_level, 2*n+2)/2
return upperlim
def poisson_lower_exact(n, sigma):
'''Return the Poisson lower limit of the confidence interval.
This is the lower limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This expression uses a
root-finding algorithm as opposed to an approximation.'''
confidence_level = scipy.stats.norm.cdf(sigma)
lowerlim = scipy.stats.chi2.ppf(1-confidence_level, 2*n)/2
return lowerlim
def poisson_lower(n, sigma):
'''Return the Poisson lower limit of the confidence interval.
This is the lower limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This formula is from Gehrels
(1986) and contains tuned parameters.'''
betas = {1.0: 0.0, 2.0: 0.062, 3.0:0.222}
gammas = {1.0: 0.0, 2.0: -2.19, 3.0: -1.85}
low = n * (1 - 1/9/n - sigma/3/np.sqrt(n) + betas[sigma]*n**gammas[sigma])**3
return low
def binomial_upper(n1, n, sigma=1):
'''The upper limit of the one-sigma binomial probability.
This is the upper limit for a given number of successes n1 out of n trials.
This is a numerically exact solution to the value.'''
if sigma <= 0:
raise ValueError("The probability needs to be positive.")
cl = -scipy.special.erf(-sigma)
ul = np.where(n1 != n, scipy.special.betaincinv(n1+1, n-n1, cl), 1)
return ul
def binomial_lower(n1, n, sigma=1):
'''The lower limit of the one-sigma binomial probability.
This is the lower limit for a given number of successes n1 out of n trials.
This provides a numerically exact solution to the value.'''
ll = 1 - binomial_upper(n-n1, n, sigma=sigma)
return ll
############################################################################
# Numpy help #
###############################################################################
def slicer_vectorized(arr, strindices):
'''Extract the substring at strindices from an array.
Given a string array arr, extract the substring elementwise corresponding
to the indices in strindices.'''
arr = np.array(arr, dtype=np.unicode_)
indexarr = np.array(strindices, dtype=np.int_)
temparr = arr.view('U1').reshape(len(arr), -1)[:,strindices]
return np.fromstring(temparr.tostring(), dtype='U'+str(len(indexarr)))
def check_null(arr, nullvalue):
'''Returns a boolean array indicating which values of arr are nullvalue.
The currently recognized types of nullvalues are floats, NaN, and
np.ma.masked. This function encapsulates using the appropriate methods,
because simply doing arr == nullvalue does not work all of the time,
particularly for NaN values.'''
if np.isnan(nullvalue):
return np.isnan(arr)
elif nullvalue is np.ma.masked:
return np.ma.getmaskarray(arr)
else:
return arr == nullvalue
###############################################################################
# Matplotlib Boundaries #
###############################################################################
def round_bound(lowbounds, upbounds, round_interval):
'''Return a lower and upper bound within the given rounding interval.
Generally the bounds should be the value plus or minus the error.
Round-interval should be the width of the tick marks.'''
minbound, maxbound = np.min(lowbounds), np.max(upbounds)
lowlim = (minbound // round_interval) * round_interval
highlim = ((maxbound // round_interval) + 1) * round_interval
return lowlim, highlim
def adjust_axes(ax, lowx, highx, lowy, highy, xdiff, ydiff):
'''Adjust the given axes to ensure all data fits within them.
Ensure that the given matplotlib axes can accomodate both the new x and y
limits provided in this function, as well as the internal x and y limits.
The tick intervals for x and y should be given in xdiff and ydiff.'''
min_x, max_x = round_bound(lowx, highx, xdiff)
min_y, max_y = round_bound(lowy, highy, ydiff)
prev_xmin, prev_xmax = ax.get_xlim()
prev_ymin, prev_ymax = ax.get_ylim()
min_x = min(min_x, prev_xmin)
max_x = max(max_x, prev_xmax)
min_y = min(min_y, prev_ymin)
max_y = max(max_y, prev_ymax)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
| {
"repo_name": "cactaur/astropy-utils",
"path": "astropy_util.py",
"copies": "1",
"size": "26719",
"license": "bsd-3-clause",
"hash": -1405172378491564800,
"line_mean": 37.7793904209,
"line_max": 82,
"alpha_frac": 0.6305625211,
"autogenerated": false,
"ratio": 4.013669821240799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144232342340799,
"avg_score": null,
"num_lines": null
} |
# A set of helpful classes and functions which are common to most classification algorithms
# This includes a 'Blob' class which will automatically determine the properties of a cluster (yay!!)
import numpy as np
from scipy.optimize import leastsq
import os
from PIL import Image
try:
import least_squares_circle
except ImportError:
from . import least_squares_circle
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def distance(point1, point2):
# Simple 2D distance function using Pythagoras:
# Calculates the distance between point1 (x, y) and point2 (x, y)
return np.sqrt(((point2[0] - point1[0])**2) + ((point2[1] - point1[1])**2))
def point_line_distance(point, centroid, theta):
x1, y1 = centroid
x2, y2 = (centroid[0] + np.cos(theta), centroid[1] + np.sin(theta))
x0, y0 = point
# cheers wikipedia
return np.fabs( (y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1 ) / np.sqrt( (y2-y1)**2 + (x2-x1)**2 )
# Stores and calculates the attributes of a single cluster ('blob') of pixels
class Blob:
def __init__(self, pixels):
self.pixels = pixels
self.num_pixels = len(pixels)
if not self.num_pixels:
raise Exception("Cannot work on a blank cluster!")
# Calculate attributes
self.centroid = self.find_centroid()
self.radius = self.calculate_radius()
self.diameter = 2 * self.radius
self.density = self.calculate_density()
self.squiggliness, self.best_fit_theta = self.calculate_squiggliness()
self.best_fit_circle = self.find_best_fit_circle() # x, y, radius, residuals
self.curvature_radius = self.best_fit_circle[2]
self.circle_residual = self.best_fit_circle[3]
self.line_residual = self.squiggliness # For silly people who like words which actually exist
self.width = self.num_pixels / (2 * self.radius) if not self.num_pixels == 1 else 0
self.avg_neighbours = self.find_avg_neighbours()
def find_avg_neighbours(self):
n_ns = []
for x,y in self.pixels:
z = [(x-1,y-1), (x-1, y), (x-1, y+1), (x, y-1), (x,y+1), (x+1, y-1), (x+1, y), (x+1, y+1)]
n_ns.append(len(set(z).intersection(self.pixels)))
return np.mean(n_ns)
def find_centroid(self):
# Firstly, compute the centroid of the blob
x_vals, y_vals = [], []
for pixel in self.pixels:
x_vals.append(pixel[0])
y_vals.append(pixel[1])
centroid = (np.mean(x_vals), np.mean(y_vals))
return centroid
def calculate_radius(self):
# Loop through each pixel and check its distance from the centroid; set the radius to the highest of these
radius = 0.0
for pixel in self.pixels:
dist = distance(self.centroid, pixel)
if dist > radius:
radius = dist
return radius
def calculate_density(self):
# Calculate the fill by hit pixels of a circle of the blob's radius around the centroid]
# This can be >1 as the blob's radius passes through the centre of outer pixels rather than around them
# Firstly, compute the area of the enclosing circle
circle_area = np.pi*((self.radius)**2)
if circle_area == 0:
# If the blob is only one pixel in size, and so has a radius of 0, it is fully dense
return 1
else:
# Divide the number of pixels in the blob by this
return self.num_pixels / circle_area
def calculate_squiggliness(self):
# return angle theta anticlockwise from x axis, with the line passing through the cluster centroid
# Split up into x and y value lists
x_vals, y_vals = [], []
for pixel in self.pixels:
x_vals.append(pixel[0])
y_vals.append(pixel[1])
# Special case for single pixel: horizontal line, completely linear!
if len(self.pixels) == 1:
return (0, 0)
# Otherwise, use leastsq to estimate a line of best fit
# x axis as inital guess
first_guess_theta = 0.1
# Use scipy's regression function to magic this into a good LoBF
best_fit_theta = leastsq(self.residuals, first_guess_theta, args = (np.array(y_vals), np.array(x_vals)))[0] % (np.pi)
#print np.degrees(best_fit_theta)
squiggliness = np.sum([point_line_distance(p, self.centroid, best_fit_theta)**2 for p in self.pixels])
return squiggliness, best_fit_theta[0]
# For the regression in squiggliness calculations...
def residuals(self, theta, y, x):
return point_line_distance((x,y), self.centroid, theta)
def find_best_fit_circle(self):
if self.num_pixels == 1:
return 0,0,0,0
# Circle regression will break if only one pixel is given
if self.num_pixels == 1:
return 0, 0, 0, 0 # We love special cases
x_vals, y_vals = [], []
for pixel in self.pixels:
x_vals.append(pixel[0])
y_vals.append(pixel[1])
# The cluster centroid is often a very bad first guess for the circle centre, so try with a couple of others...
x, y, = self.centroid
d = self.diameter
th = np.radians(self.best_fit_theta)
p1 = (x + d*np.cos(th - (np.pi/2)), y + d*np.sin(th - (np.pi/2)))
p2 = (x + d*np.cos(th + (np.pi/2)), y + d*np.sin(th + (np.pi/2)))
test_circles = [least_squares_circle.leastsq_circle(x_vals, y_vals, test_point) for test_point in [self.centroid, p1, p2]]
# circle[3] is being minimised
test_circles.sort(key = lambda circle: circle[3])
return test_circles[0]
| {
"repo_name": "calhewitt/lucid_utils",
"path": "lucid_utils/classification/common.py",
"copies": "2",
"size": "5750",
"license": "mit",
"hash": -51121469724353470,
"line_mean": 43.2307692308,
"line_max": 130,
"alpha_frac": 0.6128695652,
"autogenerated": false,
"ratio": 3.410438908659549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023308473859549,
"avg_score": null,
"num_lines": null
} |
""" A set of instructions for data collection or information dissemination.
:Authors: Sana dev team
:Version: 2.0
"""
from django.db import models
from mds.api.utils import make_uuid
class Procedure(models.Model):
""" A series of steps used to collect data observations. """
class Meta:
app_label = "core"
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
title = models.CharField(max_length=255)
""" A descriptive title for the procedure. """
author = models.CharField(max_length=255)
""" The author of the procedure """
description = models.TextField()
""" Additional narrative information about the procedure. """
version = models.CharField(max_length=255, default="1.0")
""" The version string for this instance """
src = models.FileField(upload_to='core/procedure', blank=True)
""" File storage location for the procedure """
def __unicode__(self):
return "%s %s" % (self.title, self.version)
| {
"repo_name": "dekatzenel/team-k",
"path": "mds/core/models/procedure.py",
"copies": "1",
"size": "1272",
"license": "bsd-3-clause",
"hash": -9105109386167426000,
"line_mean": 29.2857142857,
"line_max": 90,
"alpha_frac": 0.659591195,
"autogenerated": false,
"ratio": 4.038095238095238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5197686433095238,
"avg_score": null,
"num_lines": null
} |
# A set of method to build an instance
import re
from oslib import osinit, OSLibError
import subprocess
import time
from oslib.mime_utils import MimeMessage, URL
import oslib.getuser as getuser
import os
import os.path
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.networkinterface import NetworkInterfaceSpecification,NetworkInterfaceCollection
import oslib
import socket
import oslib.ec2_objects
import yaml
import oslib.resources
from oslib.instance.windows import GetWindowsPassword
def parse_facts(option, opt_str, value, parser, *args, **kwargs):
(key,value) = value
local_facts = parser.values.local_facts
if key in local_facts:
local_facts[key].append(value)
else:
local_facts[key] = [value]
def file_parser(parser):
parser.add_option("-n", "--name", dest="name", help="Instance name", default=None)
parser.add_option("-s", "--volume_size", dest="volume_size", help="new volume size (GB)", default=[], action='append', type="string")
parser.add_option("-i", "--snap_id", dest="snap_id", help="some snashop to generate volume from", default=[], action='append')
parser.add_option("-U", "--url", dest="url_commands", help="URL to os-init command", default=[], action='append')
parser.add_option("-t", "--instance-type", dest="instance_type", help="Specifies the type of instance to be launched.", default=None)
parser.add_option("-p", "--placement", dest="placement", help="The availability zone in which to launch the instances")
parser.add_option("-S", "--security_groups", dest="security_groups", help="The names of the security groups with which to associate instances", action='append')
parser.add_option("-u", "--user", dest="user", help="login user", default=None)
parser.add_option("-k", "--key_name", dest="key_name", help="key name", default=None)
parser.add_option("-f", "--key_file", dest="key_file", help="key file", default=None)
parser.add_option("-F", "--Fact", dest="local_facts", help="Local facts", default={}, action="callback", callback=parse_facts, nargs=2, type="string")
parser.add_option("-H", "--hostname", dest="hostname", help="Set the hostname", default=None)
parser.add_option("-T", "--template", dest="template", help="template file", default=None)
parser.add_option("-V", "--variable", dest="variables", help="Variables", default=[], action='append')
parser.add_option("-r", "--ressource", dest="ressource", help="embedded ressource to add", default=[], action='append')
parser.add_option("-R", "--ressources_dir", dest="ressources_dir", help="ressource dir search path", default=[], action='append')
parser.add_option("-e", "--elastic_ip", dest="elastic_ip", help="create and associate an EIP for this vm", default=None, action="store_true")
parser.add_option("-I", "--private_ip", dest="private_ip_address", help="set this IP, if instance is in a VPC", default=None)
parser.add_option("-P", "--profile", dest="instance_profile_arn", help="The arn of the IAM Instance Profile (IIP) to associate with the instances.", default=None)
parser.add_option("-N", "--notrun", dest="run", help="Don't run the post install command", default=True, action="store_false")
parser.add_option("", "--subnet-id", dest="subnet_id", help="", default=None)
def build_user_data(user_data_properties, **kwargs):
user_data = MimeMessage()
for var in kwargs.pop('variables'):
var_content = var.partition('=')
if len(var_content) != 3:
raise OSLibError("Invalide variable: %s" % var)
user_data_properties[var_content[0]] = var_content[2]
if len(user_data_properties) > 0:
user_data.append(user_data_properties)
hostname = kwargs.pop('hostname')
if hostname is not None:
user_data.append("#!/bin/bash\nhostname %s && uname -a" % hostname)
#Check for local facts
local_facts = kwargs.pop('local_facts', None)
if local_facts is not None and len(local_facts) > 0:
user_data_string = "---\n"
for k,v in local_facts.iteritems():
user_data_string += "%s: %s\n" % (k, ",".join(v))
user_data.append(user_data_string, content_type='application/facter-yaml', filename='localfacts.yaml')
url_commands = kwargs.pop('url_commands')
if len(url_commands) > 0:
user_data.append(url_commands)
root_embedded = oslib.resources.__path__
search_path = [ root_embedded[0] ]
search_path.extend(kwargs.pop('ressources_dir'))
for r in kwargs.pop('ressource'):
done = False
# look for the resource in the resources search path
for path in search_path:
resource_path = os.path.join(path, r)
if os.path.exists(resource_path):
user_data.append(content_file_path=resource_path)
done = True
if not done:
raise OSLibError("resource not found: %s" % r)
kwargs['user_data'] = "%s" % user_data
return kwargs
def get_remote_user(ctxt, **kwargs):
if 'user' not in kwargs or not kwargs['user']:
remote_user = ctxt.user
else:
remote_user = kwargs['user']
if 'user' in kwargs:
del kwargs['user']
return (remote_user, kwargs)
def get_key_file(ctxt, **kwargs):
if 'key_file' not in kwargs or not kwargs['key_file']:
key_file = ctxt.key_file
else:
key_file = kwargs['key_file']
if 'key_file' in kwargs:
del kwargs['key_file']
return (key_file, kwargs)
tag_re = re.compile('tag:(.*)')
def do_tags(**kwargs):
tags = {}
for arg in kwargs.keys():
match_tag = tag_re.match(arg)
if match_tag!=None:
key = match_tag.group(1)
tags[key] = kwargs.pop(arg)
name = kwargs.pop('name', None)
if name is not None:
tags['Name'] = name
if not 'creator' in tags:
user = getuser.user
tags['creator'] = user
return (tags, kwargs)
def remote_setup(instance, remote_user, key_file):
osinit_path = osinit.__file__
if osinit_path.endswith('.pyc'):
osinit_path = osinit_path[:len(osinit_path) - 1]
remote_path="%s@%s:/tmp/osinit.py" % (remote_user, instance.public_dns_name)
args=["scp", "-o", "GSSAPIAuthentication=no", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-i", key_file, osinit_path, remote_path]
subprocess.call(args)
for remote_cmd in ('yum install -y sudo', 'sudo -n python /tmp/osinit.py decode'):
args=["ssh", "-tt", "-o", "GSSAPIAuthentication=no", "-o", "UserKnownHostsFile=/dev/null", "-x", "-o", "StrictHostKeyChecking=no", "-i", key_file, "-l", remote_user, instance.public_dns_name, remote_cmd]
subprocess.call(args)
# Eventually remove the ssh public host key
args=["ssh-keygen", "-R", instance.public_dns_name]
subprocess.call(args)
args=["ssh-keygen", "-R", instance.ip_address]
subprocess.call(args)
def parse_template(ctxt, template_file_name, kwargs):
f = open(template_file_name)
dataMap = yaml.safe_load(f)
f.close()
if not 'image_id' in kwargs or kwargs['image_id'] == None :
ami_kwargs = {}
if 'ami_name' in dataMap:
ami_kwargs['name'] = dataMap['ami_name']
del dataMap['ami_name']
elif 'ami_id' in dataMap:
ami_kwargs['id'] = dataMap['id']
del dataMap['ami_id']
ami = oslib.ec2_objects.AMI(ctxt, **ami_kwargs)
ami.get()
kwargs['image_id'] = ami.id
# check all the values that needs to be an array
for varg in ('security_groups', 'embedded_commands', 'snap_id'):
if varg in dataMap:
value = dataMap[varg]
if [].__class__ == value.__class__:
kwargs[varg] = value
elif "".__class__ == value.__class__ or u"".__class__ == value.__class__:
kwargs[varg] = [ value ]
del dataMap[varg]
if 'local_facts' in dataMap:
local_facts = kwargs['local_facts']
for k in dataMap['local_facts']:
if not k in local_facts:
local_facts[k] = dataMap['local_facts'][k]
del dataMap['local_facts']
for k in dataMap:
if k in dataMap and (not k in kwargs or kwargs[k] == None or len(kwargs[k]) == {} or kwargs[k] == []):
kwargs[k] = dataMap[k]
return kwargs
def do_build(ctxt, **kwargs):
conn = ctxt.cnx_ec2
if 'template' in kwargs and kwargs['template']:
template_file_name = kwargs['template']
kwargs = parse_template(ctxt, template_file_name, kwargs)
del kwargs['template']
defaultrun = {'instance_type': 'm1.large', 'key_name': ctxt.key_name }
for key in defaultrun:
if key not in kwargs or kwargs[key] == None:
kwargs[key] = defaultrun[key]
(remote_user, kwargs) = get_remote_user(ctxt, **kwargs)
(key_file, kwargs) = get_key_file(ctxt, **kwargs)
(tags,kwargs) = do_tags(**kwargs)
do_run_scripts = kwargs.pop('run')
###########
# Check VM naming
###########
if 'Name' not in tags and kwargs['hostname'] is not None:
tags['Name'] = kwargs['hostname']
if 'Name' not in tags:
yield "instance name is mandatory"
return
try:
oslib.ec2_objects.Instance(ctxt, name=tags['Name']).get()
# if get succed, the name already exist, else get throws an exception
yield "duplicate name %s" % tags['Name']
return
except:
pass
user_data_properties = {}
image = kwargs.pop('image_id', None)
###########
# Check device mapping
###########
volumes = BlockDeviceMapping(conn)
first_volume = 'f'
l = first_volume
ebs_optimized = False
for volume_info in kwargs.pop('volume_size', []):
# yaml is not typed, volume_info can be a string or a number
if isinstance(volume_info, basestring):
options = volume_info.split(',')
size = int(oslib.parse_size(options[0], 'G', default_suffix='G'))
else:
options = []
size = int(volume_info)
vol_kwargs = {"connection":conn, "size": size}
if len(options) > 1:
for opt in options[1:]:
parsed = opt.split('=')
key = parsed[0]
if len(parsed) == 2:
value = parsed[1]
elif len(parsed) == 1:
value = True
else:
raise OSLibError("can't parse volume argument %s", opt)
if key == 'iops':
ebs_optimized = True
vol_kwargs['volume_type'] = 'io1'
vol_kwargs[key] = value
volumes["/dev/sd%s"%l] = BlockDeviceType(**vol_kwargs)
l = chr( ord(l[0]) + 1)
kwargs['ebs_optimized'] = ebs_optimized
# if drive letter is not f, some volumes definition was found
if l != first_volume:
kwargs['block_device_map'] = volumes
user_data_properties['volumes'] = ' '.join(volumes.keys())
# after user_data_properties['volumes'] otherwise they will be lvm'ed
for snapshot_id in kwargs.pop('snap_id', []):
volumes["/dev/sd%s"%l] = BlockDeviceType(connection=conn, snapshot_id=snapshot_id)
l = chr( ord(l[0]) + 1)
kwargs = build_user_data(user_data_properties, **kwargs)
###########
# Check elastic IP
###########
if kwargs['elastic_ip']:
eip = True
else:
eip = False
del kwargs['elastic_ip']
for k in kwargs.keys()[:]:
value = kwargs[k]
if kwargs[k] == None:
del(kwargs[k])
elif value.__class__ == [].__class__ and len(value) == 0:
del(kwargs[k])
if 'private_ip_address' in kwargs and kwargs['private_ip_address']:
netif_specification = NetworkInterfaceCollection()
netif_kwargs = {}
if kwargs['private_ip_address']:
netif_kwargs['private_ip_address'] = kwargs['private_ip_address']
del kwargs['private_ip_address']
if 'associate_public_ip_address' in kwargs and kwargs['associate_public_ip_address']:
netif_kwargs['associate_public_ip_address'] = kwargs['associate_public_ip_address']
del kwargs['associate_public_ip_address']
if 'security_groups' in kwargs and kwargs['security_groups']:
netif_kwargs['groups'] = kwargs['security_groups']
del kwargs['security_groups']
netif_kwargs['subnet_id'] = kwargs['subnet_id']
del kwargs['subnet_id']
print netif_kwargs
spec = NetworkInterfaceSpecification(**netif_kwargs)
netif_specification.append(spec)
kwargs['network_interfaces'] = netif_specification
reservation = conn.run_instances(image, **kwargs)
instance = reservation.instances[0]
# Quick hack to keep the selected remote user
instance.remote_user = remote_user
if len(tags) > 0:
conn.create_tags([ instance.id ], tags)
if instance.interfaces and len(instance.interfaces) > 0:
for interface in instance.interfaces:
conn.create_tags([ interface.id ], {'creator': tags['creator']})
while instance.state != 'running' and instance.state != 'terminated':
instance.update(True)
yield (".")
time.sleep(1)
yield ("\n")
if eip:
ip = conn.allocate_address().public_ip
conn.associate_address(instance_id = instance.id, public_ip=ip)
conn.create_tags([instance.id], {"EIP": ip})
#Update tag for this instance's volumes
for device in instance.block_device_mapping:
device_type = instance.block_device_mapping[device]
(vol_tags, vol_kwargs) = do_tags(name='%s/%s' % (tags['Name'], device.replace('/dev/','')))
conn.create_tags([ device_type.volume_id ], vol_tags)
instance.update(True)
windows_instance = instance.platform == 'Windows'
if do_run_scripts and not windows_instance:
while instance.state != 'terminated':
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1.0)
s.connect((instance.public_dns_name, 22))
s.close()
break
except socket.error, msg:
yield (".")
s.close()
time.sleep(1)
yield ("\n")
instance.key_file = key_file
remote_setup(instance, remote_user, key_file)
elif windows_instance:
os_instance = oslib.ec2_objects.Instance(ctxt, id=instance.id)
passget = GetWindowsPassword()
passget.set_context(ctxt)
passget.ec2_object = os_instance
passget.validate(None)
try_again = True
while try_again:
try:
password = "\npassword is '%s'\n" % passget.execute(key_file=key_file)
yield password
try_again = False
except OSLibError:
yield (".")
time.sleep(1)
yield instance
| {
"repo_name": "fbacchella/oscmd",
"path": "oslib/build.py",
"copies": "1",
"size": "15215",
"license": "apache-2.0",
"hash": -5772989176944293000,
"line_mean": 39.3580901857,
"line_max": 213,
"alpha_frac": 0.5955964509,
"autogenerated": false,
"ratio": 3.6911693352741386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47867657861741386,
"avg_score": null,
"num_lines": null
} |
'''A set of mixin classes.'''
class Comparable(object):
'''A mixin class which defines all of the rich comparison methods in terms
of the __eq__ and __lt__ methods.'''
def __ne__(self, y):
return not self.__eq__(y)
def __gt__(self, y):
return y.__lt__(self)
def __ge__(self, y):
return not self.__lt__(y)
def __le__(self, y):
return not y.__lt__(self)
class Keyed(object):
'''A mixin class which defines hash value and equality methods in terms of
a single __key__ method.'''
def __lt__(self, y):
return self.__key__().__lt__(y.__key__())
def __hash__(self):
return self.__key__().__hash__()
def __eq__(self, y):
return self.__key__().__eq__(y.__key__())
class Subscripted(object):
'''A mixin class which affixes a "subscript" to its objects, which is
useful for creating similar but non-equal objects from the same value.'''
def __init__(self, subscript):
'''Initialize the object with a subscript, usually an integer.'''
self.subscript = subscript
def __str__(self):
'''The subscript appears in brackets after the object's normal string
value unless it is a single integer, when it appears without brackets.
'''
if type(self.subscript) == int:
substr = str(self.subscript)
else:
substr = '[%s]' % self.subscript
return super(Subscripted, self).__str__() + substr
def __eq__(self, y):
'''Two subscripted objects must have equal values and equal subscripts
to be equal.'''
return isinstance(y, Subscripted) and \
self.subscript == y.subscript and \
super(Subscripted, self).__eq__(y)
class Primed(Subscripted):
'''A mixin class which affixes a "prime mark" to an object, to distinguish
it from its original value.'''
def __init__(self, num_primes):
assert isinstance(num_primes, int) and num_primes > 0
Subscripted.__init__(self, num_primes)
@property
def num_primes(self):
return self.subscript
def __str__(self):
return super(Subscripted, self).__str__() + '\'' * self.subscript
def __eq__(self, y):
'''A primed object is equal to another if and only if the other object
is a primed object with the same value.'''
return isinstance(y, Primed) and \
super(Primed, self).__eq__(y)
| {
"repo_name": "bdusell/pycfg",
"path": "src/util/mixin.py",
"copies": "1",
"size": "2461",
"license": "mit",
"hash": 3336290950663226400,
"line_mean": 30.961038961,
"line_max": 78,
"alpha_frac": 0.5786265746,
"autogenerated": false,
"ratio": 4.014681892332789,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5093308466932789,
"avg_score": null,
"num_lines": null
} |
# A set of NLP functions, none of which are currently being used
import nltk
from nltk.corpus import stopwords
text = "" # TO be included from the Article Abstract
# Used when tokenizing words
sentence_re = r'''(?x) # set flag to allow verbose regexps
([A-Z])(\.[A-Z])+\.? # abbreviations, e.g. U.S.A.
| \w+(-\w+)* # words with optional internal hyphens
| \$?\d+(\.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| \.\.\. # ellipsis
| [][.,;"'?():-_`] # these are separate tokens
'''
lemmatizer = nltk.WordNetLemmatizer()
stemmer = nltk.stem.porter.PorterStemmer()
# Taken from Su Nam Kim Paper...
grammar = r"""
NBAR:
{<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
"""
chunker = nltk.RegexpParser(grammar)
toks = nltk.regexp_tokenize(text, sentence_re)
postoks = nltk.tag.pos_tag(text)
print(postoks)
tree = chunker.parse(postoks)
stopwords = stopwords.words('english')
def leaves(tree):
"""Finds NP (nounphrase) leaf nodes of a chunk tree."""
for subtree in tree.subtrees(filter=lambda t: t.node == 'NP'):
yield subtree.leaves()
def normalise(word):
"""Normalises words to lowercase and stems and lemmatizes it."""
word = word.lower()
word = stemmer.stem_word(word)
word = lemmatizer.lemmatize(word)
return word
def acceptable_word(word):
"""Checks conditions for acceptable word: length, stopword."""
accepted = bool(2 <= len(word) <= 40
and word.lower() not in stopwords)
return accepted
def get_terms(tree):
for leaf in leaves(tree):
term = [normalise(w) for w, t in leaf if acceptable_word(w)]
yield term
terms = get_terms(tree)
for term in terms:
for word in term:
print(word)
print()
| {
"repo_name": "neelsomani/brainspell-neo",
"path": "archive/natural_language.py",
"copies": "2",
"size": "1908",
"license": "mit",
"hash": 3960337969885474000,
"line_mean": 25.1369863014,
"line_max": 73,
"alpha_frac": 0.6132075472,
"autogenerated": false,
"ratio": 3.138157894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47513654419368423,
"avg_score": null,
"num_lines": null
} |
""" A set of NumPy functions to apply per chunk """
from collections.abc import Container, Iterable, Sequence
from functools import wraps
from tlz import concat
import numpy as np
from . import numpy_compat as npcompat
from ..core import flatten
from ..utils import ignoring
from numbers import Integral
try:
from numpy import take_along_axis
except ImportError: # pragma: no cover
take_along_axis = npcompat.take_along_axis
def keepdims_wrapper(a_callable):
"""
A wrapper for functions that don't provide keepdims to ensure that they do.
"""
@wraps(a_callable)
def keepdims_wrapped_callable(x, axis=None, keepdims=None, *args, **kwargs):
r = a_callable(x, axis=axis, *args, **kwargs)
if not keepdims:
return r
axes = axis
if axes is None:
axes = range(x.ndim)
if not isinstance(axes, (Container, Iterable, Sequence)):
axes = [axes]
r_slice = tuple()
for each_axis in range(x.ndim):
if each_axis in axes:
r_slice += (None,)
else:
r_slice += (slice(None),)
r = r[r_slice]
return r
return keepdims_wrapped_callable
# Wrap NumPy functions to ensure they provide keepdims.
sum = np.sum
prod = np.prod
min = np.min
max = np.max
argmin = keepdims_wrapper(np.argmin)
nanargmin = keepdims_wrapper(np.nanargmin)
argmax = keepdims_wrapper(np.argmax)
nanargmax = keepdims_wrapper(np.nanargmax)
any = np.any
all = np.all
nansum = np.nansum
nanprod = np.nanprod
nancumprod = np.nancumprod
nancumsum = np.nancumsum
nanmin = np.nanmin
nanmax = np.nanmax
mean = np.mean
with ignoring(AttributeError):
nanmean = np.nanmean
var = np.var
with ignoring(AttributeError):
nanvar = np.nanvar
std = np.std
with ignoring(AttributeError):
nanstd = np.nanstd
def coarsen(reduction, x, axes, trim_excess=False, **kwargs):
""" Coarsen array by applying reduction to fixed size neighborhoods
Parameters
----------
reduction: function
Function like np.sum, np.mean, etc...
x: np.ndarray
Array to be coarsened
axes: dict
Mapping of axis to coarsening factor
Examples
--------
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> coarsen(np.sum, x, {0: 2})
array([ 3, 7, 11])
>>> coarsen(np.max, x, {0: 3})
array([3, 6])
Provide dictionary of scale per dimension
>>> x = np.arange(24).reshape((4, 6))
>>> x
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
>>> coarsen(np.min, x, {0: 2, 1: 3})
array([[ 0, 3],
[12, 15]])
You must avoid excess elements explicitly
>>> x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
>>> coarsen(np.min, x, {0: 3}, trim_excess=True)
array([1, 4])
"""
# Insert singleton dimensions if they don't exist already
for i in range(x.ndim):
if i not in axes:
axes[i] = 1
if trim_excess:
ind = tuple(
slice(0, -(d % axes[i])) if d % axes[i] else slice(None, None)
for i, d in enumerate(x.shape)
)
x = x[ind]
# (10, 10) -> (5, 2, 5, 2)
newshape = tuple(concat([(x.shape[i] // axes[i], axes[i]) for i in range(x.ndim)]))
return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim * 2, 2)), **kwargs)
def trim(x, axes=None):
""" Trim boundaries off of array
>>> x = np.arange(24).reshape((4, 6))
>>> trim(x, axes={0: 0, 1: 1})
array([[ 1, 2, 3, 4],
[ 7, 8, 9, 10],
[13, 14, 15, 16],
[19, 20, 21, 22]])
>>> trim(x, axes={0: 1, 1: 1})
array([[ 7, 8, 9, 10],
[13, 14, 15, 16]])
"""
if isinstance(axes, Integral):
axes = [axes] * x.ndim
if isinstance(axes, dict):
axes = [axes.get(i, 0) for i in range(x.ndim)]
return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]
def topk(a, k, axis, keepdims):
""" Chunk and combine function of topk
Extract the k largest elements from a on the given axis.
If k is negative, extract the -k smallest elements instead.
Note that, unlike in the parent function, the returned elements
are not sorted internally.
"""
assert keepdims is True
axis = axis[0]
if abs(k) >= a.shape[axis]:
return a
a = np.partition(a, -k, axis=axis)
k_slice = slice(-k, None) if k > 0 else slice(-k)
return a[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))]
def topk_aggregate(a, k, axis, keepdims):
""" Final aggregation function of topk
Invoke topk one final time and then sort the results internally.
"""
assert keepdims is True
a = topk(a, k, axis, keepdims)
axis = axis[0]
a = np.sort(a, axis=axis)
if k < 0:
return a
return a[
tuple(
slice(None, None, -1) if i == axis else slice(None) for i in range(a.ndim)
)
]
def argtopk_preprocess(a, idx):
""" Preparatory step for argtopk
Put data together with its original indices in a tuple.
"""
return a, idx
def argtopk(a_plus_idx, k, axis, keepdims):
""" Chunk and combine function of argtopk
Extract the indices of the k largest elements from a on the given axis.
If k is negative, extract the indices of the -k smallest elements instead.
Note that, unlike in the parent function, the returned elements
are not sorted internally.
"""
assert keepdims is True
axis = axis[0]
if isinstance(a_plus_idx, list):
a_plus_idx = list(flatten(a_plus_idx))
a = np.concatenate([ai for ai, _ in a_plus_idx], axis)
idx = np.concatenate(
[np.broadcast_to(idxi, ai.shape) for ai, idxi in a_plus_idx], axis
)
else:
a, idx = a_plus_idx
if abs(k) >= a.shape[axis]:
return a_plus_idx
idx2 = np.argpartition(a, -k, axis=axis)
k_slice = slice(-k, None) if k > 0 else slice(-k)
idx2 = idx2[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))]
return take_along_axis(a, idx2, axis), take_along_axis(idx, idx2, axis)
def argtopk_aggregate(a_plus_idx, k, axis, keepdims):
""" Final aggregation function of argtopk
Invoke argtopk one final time, sort the results internally, drop the data
and return the index only.
"""
assert keepdims is True
a, idx = argtopk(a_plus_idx, k, axis, keepdims)
axis = axis[0]
idx2 = np.argsort(a, axis=axis)
idx = take_along_axis(idx, idx2, axis)
if k < 0:
return idx
return idx[
tuple(
slice(None, None, -1) if i == axis else slice(None) for i in range(idx.ndim)
)
]
def arange(start, stop, step, length, dtype):
res = np.arange(start, stop, step, dtype)
return res[:-1] if len(res) > length else res
def astype(x, astype_dtype=None, **kwargs):
return x.astype(astype_dtype, **kwargs)
def view(x, dtype, order="C"):
if order == "C":
x = np.ascontiguousarray(x)
return x.view(dtype)
else:
x = np.asfortranarray(x)
return x.T.view(dtype).T
def slice_with_int_dask_array(x, idx, offset, x_size, axis):
""" Chunk function of `slice_with_int_dask_array_on_axis`.
Slice one chunk of x by one chunk of idx.
Parameters
----------
x: ndarray, any dtype, any shape
i-th chunk of x
idx: ndarray, ndim=1, dtype=any integer
j-th chunk of idx (cartesian product with the chunks of x)
offset: ndarray, shape=(1, ), dtype=int64
Index of the first element along axis of the current chunk of x
x_size: int
Total size of the x da.Array along axis
axis: int
normalized axis to take elements from (0 <= axis < x.ndim)
Returns
-------
x sliced along axis, using only the elements of idx that fall inside the
current chunk.
"""
# Needed when idx is unsigned
idx = idx.astype(np.int64)
# Normalize negative indices
idx = np.where(idx < 0, idx + x_size, idx)
# A chunk of the offset dask Array is a numpy array with shape (1, ).
# It indicates the index of the first element along axis of the current
# chunk of x.
idx = idx - offset
# Drop elements of idx that do not fall inside the current chunk of x
idx_filter = (idx >= 0) & (idx < x.shape[axis])
idx = idx[idx_filter]
# np.take does not support slice indices
# return np.take(x, idx, axis)
return x[tuple(idx if i == axis else slice(None) for i in range(x.ndim))]
def slice_with_int_dask_array_aggregate(idx, chunk_outputs, x_chunks, axis):
""" Final aggregation function of `slice_with_int_dask_array_on_axis`.
Aggregate all chunks of x by one chunk of idx, reordering the output of
`slice_with_int_dask_array`.
Note that there is no combine function, as a recursive aggregation (e.g.
with split_every) would not give any benefit.
Parameters
----------
idx: ndarray, ndim=1, dtype=any integer
j-th chunk of idx
chunk_outputs: ndarray
concatenation along axis of the outputs of `slice_with_int_dask_array`
for all chunks of x and the j-th chunk of idx
x_chunks: tuple
dask chunks of the x da.Array along axis, e.g. ``(3, 3, 2)``
axis: int
normalized axis to take elements from (0 <= axis < x.ndim)
Returns
-------
Selection from all chunks of x for the j-th chunk of idx, in the correct
order
"""
# Needed when idx is unsigned
idx = idx.astype(np.int64)
# Normalize negative indices
idx = np.where(idx < 0, idx + sum(x_chunks), idx)
x_chunk_offset = 0
chunk_output_offset = 0
# Assemble the final index that picks from the output of the previous
# kernel by adding together one layer per chunk of x
# FIXME: this could probably be reimplemented with a faster search-based
# algorithm
idx_final = np.zeros_like(idx)
for x_chunk in x_chunks:
idx_filter = (idx >= x_chunk_offset) & (idx < x_chunk_offset + x_chunk)
idx_cum = np.cumsum(idx_filter)
idx_final += np.where(idx_filter, idx_cum - 1 + chunk_output_offset, 0)
x_chunk_offset += x_chunk
if idx_cum.size > 0:
chunk_output_offset += idx_cum[-1]
# np.take does not support slice indices
# return np.take(chunk_outputs, idx_final, axis)
return chunk_outputs[
tuple(
idx_final if i == axis else slice(None) for i in range(chunk_outputs.ndim)
)
]
| {
"repo_name": "ContinuumIO/dask",
"path": "dask/array/chunk.py",
"copies": "2",
"size": "10628",
"license": "bsd-3-clause",
"hash": -7527083216621886000,
"line_mean": 27.3413333333,
"line_max": 88,
"alpha_frac": 0.6027474595,
"autogenerated": false,
"ratio": 3.393358876117497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4996106335617497,
"avg_score": null,
"num_lines": null
} |
""" A set of NumPy functions to apply per chunk """
from __future__ import absolute_import, division, print_function
from collections import Container, Iterable, Sequence
from functools import wraps
from toolz import concat
import numpy as np
from . import numpy_compat as npcompat
from ..compatibility import getargspec
from ..utils import ignoring
def keepdims_wrapper(a_callable):
"""
A wrapper for functions that don't provide keepdims to ensure that they do.
"""
if "keepdims" in getargspec(a_callable).args:
return a_callable
@wraps(a_callable)
def keepdims_wrapped_callable(x, axis=None, keepdims=None, *args, **kwargs):
r = a_callable(x, axis=axis, *args, **kwargs)
if not keepdims:
return r
axes = axis
if axes is None:
axes = range(x.ndim)
if not isinstance(axes, (Container, Iterable, Sequence)):
axes = [axes]
r_slice = tuple()
for each_axis in range(x.ndim):
if each_axis in axes:
r_slice += (None,)
else:
r_slice += (slice(None),)
r = r[r_slice]
return r
return keepdims_wrapped_callable
# Wrap NumPy functions to ensure they provide keepdims.
sum = keepdims_wrapper(np.sum)
prod = keepdims_wrapper(np.prod)
min = keepdims_wrapper(np.min)
max = keepdims_wrapper(np.max)
argmin = keepdims_wrapper(np.argmin)
nanargmin = keepdims_wrapper(np.nanargmin)
argmax = keepdims_wrapper(np.argmax)
nanargmax = keepdims_wrapper(np.nanargmax)
any = keepdims_wrapper(np.any)
all = keepdims_wrapper(np.all)
nansum = keepdims_wrapper(np.nansum)
try:
from numpy import nanprod, nancumprod, nancumsum
except ImportError: # pragma: no cover
nanprod = npcompat.nanprod
nancumprod = npcompat.nancumprod
nancumsum = npcompat.nancumsum
nanprod = keepdims_wrapper(nanprod)
nancumprod = keepdims_wrapper(nancumprod)
nancumsum = keepdims_wrapper(nancumsum)
nanmin = keepdims_wrapper(np.nanmin)
nanmax = keepdims_wrapper(np.nanmax)
mean = keepdims_wrapper(np.mean)
with ignoring(AttributeError):
nanmean = keepdims_wrapper(np.nanmean)
var = keepdims_wrapper(np.var)
with ignoring(AttributeError):
nanvar = keepdims_wrapper(np.nanvar)
std = keepdims_wrapper(np.std)
with ignoring(AttributeError):
nanstd = keepdims_wrapper(np.nanstd)
def coarsen(reduction, x, axes, trim_excess=False):
""" Coarsen array by applying reduction to fixed size neighborhoods
Parameters
----------
reduction: function
Function like np.sum, np.mean, etc...
x: np.ndarray
Array to be coarsened
axes: dict
Mapping of axis to coarsening factor
Examples
--------
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> coarsen(np.sum, x, {0: 2})
array([ 3, 7, 11])
>>> coarsen(np.max, x, {0: 3})
array([3, 6])
Provide dictionary of scale per dimension
>>> x = np.arange(24).reshape((4, 6))
>>> x
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
>>> coarsen(np.min, x, {0: 2, 1: 3})
array([[ 0, 3],
[12, 15]])
You must avoid excess elements explicitly
>>> x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
>>> coarsen(np.min, x, {0: 3}, trim_excess=True)
array([1, 4])
"""
# Insert singleton dimensions if they don't exist already
for i in range(x.ndim):
if i not in axes:
axes[i] = 1
if trim_excess:
ind = tuple(slice(0, -(d % axes[i]))
if d % axes[i] else
slice(None, None) for i, d in enumerate(x.shape))
x = x[ind]
# (10, 10) -> (5, 2, 5, 2)
newshape = tuple(concat([(x.shape[i] / axes[i], axes[i])
for i in range(x.ndim)]))
return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim*2, 2)))
def trim(x, axes=None):
""" Trim boundaries off of array
>>> x = np.arange(24).reshape((4, 6))
>>> trim(x, axes={0: 0, 1: 1})
array([[ 1, 2, 3, 4],
[ 7, 8, 9, 10],
[13, 14, 15, 16],
[19, 20, 21, 22]])
>>> trim(x, axes={0: 1, 1: 1})
array([[ 7, 8, 9, 10],
[13, 14, 15, 16]])
"""
if isinstance(axes, int):
axes = [axes] * x.ndim
if isinstance(axes, dict):
axes = [axes.get(i, 0) for i in range(x.ndim)]
return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]
try:
from numpy import broadcast_to
except ImportError: # pragma: no cover
broadcast_to = npcompat.broadcast_to
def topk(k, x):
""" Top k elements of an array
>>> topk(2, np.array([5, 1, 3, 6]))
array([6, 5])
"""
# http://stackoverflow.com/a/23734295/616616 by larsmans
k = np.minimum(k, len(x))
ind = np.argpartition(x, -k)[-k:]
return np.sort(x[ind])[::-1]
| {
"repo_name": "cowlicks/dask",
"path": "dask/array/chunk.py",
"copies": "2",
"size": "4943",
"license": "bsd-3-clause",
"hash": 5415933219385118000,
"line_mean": 25.1534391534,
"line_max": 80,
"alpha_frac": 0.5876997775,
"autogenerated": false,
"ratio": 3.256258234519104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9843175285608451,
"avg_score": 0.0001565452821306376,
"num_lines": 189
} |
""" A set of NumPy functions to apply per chunk """
from __future__ import absolute_import, division, print_function
from collections import Container, Iterable, Sequence
from functools import wraps
from inspect import getargspec
from toolz import concat
import numpy as np
from ..compatibility import builtins
from ..utils import ignoring
def keepdims_wrapper(a_callable):
"""
A wrapper for functions that don't provide keepdims to ensure that they do.
"""
if "keepdims" in getargspec(a_callable).args:
return a_callable
@wraps(a_callable)
def keepdims_wrapped_callable(x, axis=None, keepdims=None, *args, **kwargs):
r = a_callable(x, axis=axis, *args, **kwargs)
if not keepdims:
return r
axes = axis
if axes is None:
axes = range(x.ndim)
if not isinstance(axes, (Container, Iterable, Sequence)):
axes = [axes]
r_slice = tuple()
for each_axis in range(x.ndim):
if each_axis in axes:
r_slice += (None,)
else:
r_slice += (slice(None),)
r = r[r_slice]
return r
return keepdims_wrapped_callable
# Wrap NumPy functions to ensure they provide keepdims.
sum = keepdims_wrapper(np.sum)
prod = keepdims_wrapper(np.prod)
min = keepdims_wrapper(np.min)
max = keepdims_wrapper(np.max)
argmin = keepdims_wrapper(np.argmin)
nanargmin = keepdims_wrapper(np.nanargmin)
argmax = keepdims_wrapper(np.argmax)
nanargmax = keepdims_wrapper(np.nanargmax)
any = keepdims_wrapper(np.any)
all = keepdims_wrapper(np.all)
nansum = keepdims_wrapper(np.nansum)
with ignoring(AttributeError):
nanprod = keepdims_wrapper(np.nanprod)
nanmin = keepdims_wrapper(np.nanmin)
nanmax = keepdims_wrapper(np.nanmax)
mean = keepdims_wrapper(np.mean)
with ignoring(AttributeError):
nanmean = keepdims_wrapper(np.nanmean)
var = keepdims_wrapper(np.var)
with ignoring(AttributeError):
nanvar = keepdims_wrapper(np.nanvar)
std = keepdims_wrapper(np.std)
with ignoring(AttributeError):
nanstd = keepdims_wrapper(np.nanstd)
def coarsen(reduction, x, axes):
""" Coarsen array by applying reduction to fixed size neighborhoods
Parameters
----------
reduction: function
Function like np.sum, np.mean, etc...
x: np.ndarray
Array to be coarsened
axes: dict
Mapping of axis to coarsening factor
Example
-------
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> coarsen(np.sum, x, {0: 2})
array([ 3, 7, 11])
>>> coarsen(np.max, x, {0: 3})
array([3, 6])
Provide dictionary of scale per dimension
>>> x = np.arange(24).reshape((4, 6))
>>> x
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
>>> coarsen(np.min, x, {0: 2, 1: 3})
array([[ 0, 3],
[12, 15]])
"""
# Insert singleton dimensions if they don't exist already
for i in range(x.ndim):
if i not in axes:
axes[i] = 1
# (10, 10) -> (5, 2, 5, 2)
newshape = tuple(concat([(x.shape[i] / axes[i], axes[i])
for i in range(x.ndim)]))
return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim*2, 2)))
def trim(x, axes=None):
""" Trim boundaries off of array
>>> x = np.arange(24).reshape((4, 6))
>>> trim(x, axes={0: 0, 1: 1})
array([[ 1, 2, 3, 4],
[ 7, 8, 9, 10],
[13, 14, 15, 16],
[19, 20, 21, 22]])
>>> trim(x, axes={0: 1, 1: 1})
array([[ 7, 8, 9, 10],
[13, 14, 15, 16]])
"""
if isinstance(axes, int):
axes = [axes] * x.ndim
if isinstance(axes, dict):
axes = [axes.get(i, 0) for i in range(x.ndim)]
return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]
try:
from numpy import broadcast_to
except ImportError: # pragma: no cover
# broadcast_to will arrive in numpy v1.10. Until then, it is duplicated
# here:
# Copyright (c) 2005-2015, NumPy Developers.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if builtins.any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'zerosize_ok', 'refs_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3)) # doctest: +SKIP
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
def topk(k, x):
""" Top k elements of an array
>>> topk(2, np.array([5, 1, 3, 6]))
array([6, 5])
"""
# http://stackoverflow.com/a/23734295/616616 by larsmans
ind = np.argpartition(x, -k)[-k:]
return np.sort(x[ind])[::-1]
| {
"repo_name": "marianotepper/dask",
"path": "dask/array/chunk.py",
"copies": "4",
"size": "8651",
"license": "bsd-3-clause",
"hash": -4832155054916041000,
"line_mean": 31.2798507463,
"line_max": 82,
"alpha_frac": 0.6069818518,
"autogenerated": false,
"ratio": 3.8655049151027705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004365685431012046,
"num_lines": 268
} |
""" A set of NumPy functions to apply per chunk """
from toolz import concat
import numpy as np
def coarsen(reduction, x, axes):
""" Coarsen array by applying reduction to fixed size neighborhoods
Parameters
----------
reduction: function
Function like np.sum, np.mean, etc...
x: np.ndarray
Array to be coarsened
axes: dict
Mapping of axis to coarsening factor
Example
-------
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> coarsen(np.sum, x, {0: 2})
array([ 3, 7, 11])
>>> coarsen(np.max, x, {0: 3})
array([3, 6])
Provide dictionary of scale per dimension
>>> x = np.arange(24).reshape((4, 6))
>>> x
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
>>> coarsen(np.min, x, {0: 2, 1: 3})
array([[ 0, 3],
[12, 15]])
"""
# Insert singleton dimensions if they don't exist already
for i in range(x.ndim):
if i not in axes:
axes[i] = 1
# (10, 10) -> (5, 2, 5, 2)
newshape = tuple(concat([(x.shape[i] / axes[i], axes[i])
for i in range(x.ndim)]))
return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim*2, 2)))
def constant(value, shape):
""" Make a new array with a constant value
>>> constant(5, (4,))
array([5, 5, 5, 5])
"""
x = np.empty(shape=shape, dtype=np.array(value).dtype)
x.fill(value)
return x
def trim(x, axes=None):
""" Trim boundaries off of array
>>> x = np.arange(24).reshape((4, 6))
>>> trim(x, axes={0: 0, 1: 1})
array([[ 1, 2, 3, 4],
[ 7, 8, 9, 10],
[13, 14, 15, 16],
[19, 20, 21, 22]])
>>> trim(x, axes={0: 1, 1: 1})
array([[ 7, 8, 9, 10],
[13, 14, 15, 16]])
"""
if isinstance(axes, int):
axes = [axes] * x.ndim
if isinstance(axes, dict):
axes = [axes.get(i, 0) for i in range(x.ndim)]
return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/chunk.py",
"copies": "1",
"size": "2106",
"license": "bsd-3-clause",
"hash": 1914080036639437600,
"line_mean": 24.0714285714,
"line_max": 76,
"alpha_frac": 0.4966761633,
"autogenerated": false,
"ratio": 3.0345821325648417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40312582958648413,
"avg_score": null,
"num_lines": null
} |
"""A set of processes for Tumor heterogeneity analysis"""
from os import path
from pyppl import Proc
from diot import Diot
from . import params, proc_factory
pSciClone = proc_factory(
desc = "Clonality analysis using SciClone.",
config = Diot(annotate = """
@input:
muts: Somatic mutations. Could be one of:
- Single(paired)-sample VCF files, separared by comma
- Must have `FORMAT/AF` and `FORMAT/AD`
- Multi-sample VCF file
- TCGA MAF file
- Must have `t_alt_count` and `t_ref_count`
- SciClone TSV file
- Chr, Pos, RefCount, VarCount, VAF*100
cnvs: Copy numbers. Could be one of:
- Single-sample VCF files, separared by comma
- Multi-sample VCF files, must have `INFO/END` and `FORMAT/CN`
- SciClone TSV file
- Chr, Start, End, CopyNumber
@output:
outdir: The output directory.
@args:
params (Diot) : Other parameters for original `sciClone` function. Default: `Diot()`
exfile (file) : The regions to be excluded. In BED3 format
mutctrl (int|NoneType): Index of the control sample in paired-sample mutation VCF files or multi-sample VCF file, 0-based.
- `None` indicates no control sample.
- For paired-sample VCF files, if `None` specified, second sample(1) will be used as control.
cnctrl (int|NoneType): Index of the control sample in copy number VCF files or multi-sample VCF file, 0-based.
"""),
input = "muts:var, cnvs:var",
output = "outdir:dir:{{i.muts | bn | .split('.')[0]}}.sciclone",
lang = params.Rscript.value,
args = Diot(
params = Diot(),
exfile = "",
mutctrl = None,
cnctrl = None
)
)
pPyClone = proc_factory(
desc = "Clonality analysis using PyClone",
config = Diot(annotate = """
@input:
muts: Somatic mutations. Could be one of:
- Single(paired)-sample VCF files, separated by comma (i.e. a.vcf,b.vcf)
- Must have `FORMAT/AF` and `FORMAT/AD`
- Multi-sample VCF file (i.e. a.vcf)
- TCGA MAF file (i.e. a.maf)
- Must have `t_alt_count` and `t_ref_count` columns
- PyClone TSV file (i.e. a.tsv or a.txt)
- See https://github.com/aroth85/pyclone/blob/master/examples/mixing/tsv/SRR385941.tsv without `*_cn` columns
- `mutation_id` should be either `<sample>:<gt>:<chr>:<pos>` or `<chr>:<pos>`
cnvs: Copy numbers. Could be one of:
- Single-sample VCF files, separated by comma
- Multi-sample VCF file
- Must have `INFO/END` and `FORMAT/CN`
- PyClone TSV file
- See https://github.com/aroth85/pyclone/blob/master/examples/mixing/tsv/SRR385941.tsv without `*_counts` and `variant_freq` columns
@output:
`outdir:dir`: The output directory.
@args:
pyclone (str): Path to `PyClone`.
bcftools(str): Path to `bcftools`, used to get information from VCF file.
bedtools(str): Path to `bedtools`, used to intersect mutations with CNVs.
params (Diot): Other parameters for original `PyClone run_analysis_pipeline` function.
nthread (int): Number of threads to use by openblas.
mutctrl (int|NoneType): Index of the control sample in paired-sample mutation VCF files or multi-sample VCF file, 0-based.
- `None` indicates no control sample.
- For paired-sample VCF files, if `None` specified, second sample(1) will be used as control.
cnctrl (int|NoneType): Index of the control sample in copy number VCF files or multi-sample VCF file, 0-based.
"""),
input = "muts:var, cnvs:var",
output = "outdir:dir:{{i.muts | __import__('pathlib').Path \
| ?.is_file | =: [_] | !.split: ',' | [0] | fn }}.pyclone",
lang = params.python.value,
args = Diot(
pyclone = params.pyclone.value,
bcftools = params.bcftools.value,
bedtools = params.bedtools.value,
refgene = params.refgene.value,
nthread = 1,
params = Diot(),
mutctrl = None,
cnctrl = None,
)
)
pAllFIT = proc_factory(
desc = "Allele-Frequency-based Imputation of Tumor Purity Inference",
config = Diot(annotate = """
@description:
All-FIT - Allele-Frequency-based Imputation of Tumor Purity infers specimen purity from tumor-only samples sequenced with deep sequencing. It is developed in Khiabanian Lab by Jui Wan Loh and Hossein Khiabanian.
See: https://github.com/KhiabanianLab/All-FIT
@input:
infile: The input file, could be one of:
- Single(paired)-sample VCF file with `FORMAT/AF` and `FORMAT/AD`
- All-FIT format (see: https://github.com/KhiabanianLab/All-FIT/blob/master/test/input/sampleFile1.xls)
cnfile: The copy number variation file, could be one of:
- Omitted (then Ploidy in `infile` must be provided)
- Single(paired)-sample VCF file with `INFO/END` and `FORMAT/CN`
- Bed3 file with 4th column as the copy number.
@output:
outfile: The output file with purity
outdir: The output directory with output file and figures.
@args:
allfit (str): Path to All-FIT.py
bcftools (str): Path to `bcftools`, used to get information from VCF file.
bedtools (str): Path to `bedtools`, used to intersect mutations with CNVs.
params (Diot): Other parameters for All-FIT.py
mutctrl (int|NoneType): Index of the control sample in paired-sample mutation VCF file, 0-based.
- `None` indicates no control sample.
- For paired-sample VCF files, if `None` specified, second sample(1) will be used as control.
cnctrl (int|NoneType): Index of the control sample in copy number VCF file, 0-based.
nthread (int): Number of threads to use for openblas.
"""),
input = "infile:file, cnfile:var",
output = [
"outfile:file:{{i.infile | stem | @append: '.allfit'}}/{{i.infile | stem | @append: '.purity.txt'}}",
"outdir:dir:{{ i.infile | stem | @append: '.allfit'}}"
],
lang = params.python.value,
args = Diot(
allfit = params.allfit.value,
bcftools = params.bcftools.value,
bedtools = params.bedtools.value,
params = Diot(t = 'somatic'),
mutctrl = None,
cnctrl = None,
nthread = 1
)
)
pTMBurden = proc_factory(
desc = 'Calculation of tumor mutation burden.',
config = Diot(annotate = """
@input:
infile: The input MAF file
@output:
outfile: The tumor mutation burden file
@args:
type: The type of mutation burden.
- `nonsyn`: Counting nonsynonymous mutations
"""),
input = 'infile:file',
output = 'outfile:file:{{i.infile | stem}}.tmb.txt',
args = Diot(type = 'nonsyn'),
lang = params.python.value,
)
pQuantumClone = proc_factory(
desc = "Clonality analysis using QuantumClone",
lang = params.Rscript.value,
config = Diot(annotate = """
@description:
Clonality analysis using QuantumClone:
https://academic.oup.com/bioinformatics/article/34/11/1808/4802225
@input:
`vfvcfs:files`: The input vcf files with mutations
@output:
`outdir:dir`: The output directory
@args:
`params` : other parameters for `QuantumClone`'s `One_step_clustering`
`vfsamcol`: The index of the target sample in mutation VCF file, 1-based. Default: `1`
`varcount`: An R function string to define how to get the variant allele count. Default: `function(fmt) as.integer(unlist(strsplit(fmt$AD, ","))[2])`
- If this function returns `NULL`, record will be skipped.
- It can use the sample calls (`fmt`) and also the record info (`info`)
- Both `function(fmt) ...` and `function(fmt, info) ...` can be used.
- Don't include `info` if not necessary. This saves time.
- This function can return the variant count directly, or
- an R `list` like: `list(count = <var count>, depth = <depth>)`.
- By default, the `depth` will be read from `fmt$DP`
`nthread` : # threads to use. Default: `1`
"""))
pQuantumClone.input = 'vfvcfs:files'
pQuantumClone.output = "outdir:dir:{{i.vfvcfs | path.commonprefix | bn}}.qclone"
pQuantumClone.envs.path = path
pQuantumClone.args.params = Diot()
pQuantumClone.args.vfsamcol = 1 # 1-based
pQuantumClone.args.varcount = 'function(fmt) as.integer(unlist(strsplit(fmt$AD, ","))[2])'
pQuantumClone.args.nthread = 1
pTheta = proc_factory(
desc = 'Run THetA2 for tumor purity calculation',
config = Diot(annotate = """
@description:
Run THetA2 for tumor purity calculation
Set lower MIN_FRAC if interval is not enough and NO_CLUSTERING if it raises
"No valid Copy Number Profiles exist", but have to pay attention to the results.
(see: https://groups.google.com/forum/#!topic/theta-users/igrEUol3sZo)
THetA2 needs an interval file with the coverages of both tumor and normal and SNP files
with counts of mutation allele and reference allele for both tumor and normal as well.
@input:
infile: A bed3 file, or a THetA2 interval file.
- See: https://github.com/raphael-group/THetA/blob/master/example/Example.intervals
tumbam: bam file for tumor or SNP file for tumor.
- See: https://raw.githubusercontent.com/raphael-group/THetA/master/example/TUMOR_SNP.formatted.txt
normbam: bam file for normal or SNP file for normal.
- See: https://raw.githubusercontent.com/raphael-group/THetA/master/example/NORMAL_SNP.formatted.txt
@output:
outfile: The output file with purity.
outdir: The output directory with output files and figures.
@args:
theta (str): Path to THetA2
bam_readcount (str): Path to bam_readcount.
bedtools (str): Path to bedtools, used to extract coverage of regions.
samtools (str): Path to samtools, used to index bam file if possible.
ref (file): The reference genome file.
params (Diot): Other parameters for THetA2.
nthread (int): The number of threads to use.
affysnps (str): The affymetrix Array snps, or other candidate snp list, in BED6-like format
- The first 6 columns should be in BED6 format
- The 7th column is reference allele, and 8th column is mutation allele.
"""),
lang = params.python.value,
input = 'infile:file, tumbam:file, normbam:file',
output = [
'outfile:file:{{i.infile | stem}}.theta/{{i.infile | stem}}.purity.txt',
'outdir:dir:{{i.infile | stem}}.theta'
],
args = Diot(
theta = params.theta2.value,
bedtools = params.bedtools.value,
bam_readcount = params.bam_readcount.value,
samtools = params.samtools.value,
params = Diot(),
ref = params.ref.value,
nthread = 1,
affysnps = params.affysnps.value,
)
)
pSuperFreq = proc_factory(
desc = "Subclonal analysis with superFreq",
lang = params.Rscript.value)
pSuperFreq.input = "indir:dir, gfile:file"
pSuperFreq.output = "outdir:dir:{{i.indir | fn2}}-{{i.gfile | fn2}}.superfreq"
pSuperFreq.args.nthread = 1
pSuperFreq.args.baits = '' # target regions
pSuperFreq.args.ref = params.ref.value
pSuperFreq.args.resdir = params.superfreq_res.value
pSuperFreq.args.genome = params.genome.value
pSuperFreq.args.params = Diot(
systematicVariance = .02, maxCov = 150, BQoffset = 33,
mode = 'exome', splitRun = True
)
pClonEvol = proc_factory(
desc = "Inferring and visualizing clonal evolution in multi-sample cancer sequencing",
config = Diot(annotate = """
@input:
mutfile: The mutation file or output directory from PyClone.
- https://github.com/hdng/clonevol/issues/4#issuecomment-280997440
- VAF, CCF should x100
samfile: The sample information file.
drivers: The driver genes.
- One per line or
- Mutsig file with top 10 genes
@output:
outdir: The output directory.
@args:
inopts: The input options to read the mutation file.
params: The parameters for individual `ClonEvol` functions.
"""),
input = 'mutfile:file, samfile:file, drivers:var',
output = 'outdir:dir:{{i.mutfile | stem}}.clonevol',
lang = params.Rscript.value,
args = Diot(
# only for clonevol input format
inopts = Diot(rnames = False, cnames = True),
drivers = [],
refgene = params.refgene.value,
bedtools = params.bedtools.value,
devpars = Diot(width = 2000, height = 2000, res = 300),
params = Diot({
'plot.variant.clusters': Diot(
# see https://rdrr.io/github/hdng/clonevol/man/plot.variant.clusters.html
),
'plot.cluster.flow': Diot({
# see https://rdrr.io/github/hdng/clonevol/man/plot.cluster.flow.html
}),
'infer.clonal.models': Diot({
# see https://rdrr.io/github/hdng/clonevol/man/infer.clonal.models.html
"founding.cluster": 1,
"cluster.center": "mean",
"sum.p.cutoff": 0.05,
"alpha": 0.05,
}),
'transfer.events.to.consensus.trees': Diot({
# see https://rdrr.io/github/hdng/clonevol/man/transfer.events.to.consensus.trees.html
"event.col.name": "gene"
}),
'convert.consensus.tree.clone.to.branch': Diot({
# see https://rdrr.io/github/hdng/clonevol/man/convert.consensus.tree.clone.to.branch.html
"branch.scale": "sqrt"
}),
'plot.clonal.models': Diot({
# see https://rdrr.io/github/hdng/clonevol/man/plot.clonal.models.html
# "clone.shape" : 'bell',
# "bell.event" : True,
# "bell.event.label.color" : 'blue',
# "bell.event.label.angle" : 60,
# "clone.time.step.scale" : 1,
# "bell.curve.step" : 2,
# "merged.tree.plot" : True,
# "tree.node.label.split.character" : None,
# "tree.node.shape" : 'circle',
# "tree.node.size" : 30,
# "tree.node.text.size" : 0.5,
# "merged.tree.node.size.scale" : 1.25,
# "merged.tree.node.text.size.scale": 2.5,
# "merged.tree.cell.frac.ci" : False,
# "mtcab.event.sep.char" : ',',
"mtcab.branch.text.size" : .8,
# "mtcab.branch.width" : 0.75,
# "mtcab.node.size" : 3,
# "mtcab.node.label.size" : 1,
"mtcab.node.text.size" : .8,
# "cell.plot" : True,
# "num.cells" : 100,
# "cell.border.size" : 0.25,
# "cell.border.color" : 'black',
# "clone.grouping" : 'horizontal',
# "show.score" : False,
# "cell.frac.ci" : True,
# "disable.cell.frac" : False
})
})
)
)
pSchism = proc_factory(
desc = 'Infer subclonal hierarchy and the tumor evolution from somatic mutations using SCHISM',
config = Diot(annotate = """
@description:
Infer subclonal hierarchy and the tumor evolution from somatic mutations using SCHISM.
See: https://github.com/KarchinLab/SCHISM
@input:
infile: The input mutation file
- In format of:
```
sampleID mutationID referenceReads variantReads copyNumber clusterID
S1 0 368 132 2 0
S2 1 381 119 2 0
S3 2 367 133 3 1
...
```
- Or `pPyClone` output directory
- Or a MAF file
purity: The purity of the samples, two columns:
- Sample[tab]Purity, without header.
- If omitted, will assume 100% purity for all samples.
@output:
outdir: The output directory.
@args:
schism (str) : Path to runSchism
params (Diot) : Other parameters for runSchism in yaml config.
fishplot (bool): Whether generate a fishplot or not, requires fishplot install with R.
- Not implemented yet
Rscript (str) : Path to Rscript to run fishplot
"""),
lang = params.python.value,
input = 'infile:file, purity:file',
output = 'outdir:dir:{{i.infile | stem}}.schism',
args = Diot(
schism = params.schism.value,
dot = params.dot.value,
fishplot = True,
Rscript = params.Rscript.value,
devpars = Diot(res = 100),
params = Diot(
cellularity_estimator = Diot(),
hypothesis_test = Diot(),
genetic_algorithm = Diot()
)
)
)
pLichee = proc_factory(
desc = 'Fast and scalable inference of multi-sample cancer lineages using LICHeE',
config = Diot(annotate = """
@description:
Fast and scalable inference of multi-sample cancer lineages using LICHeE.
See: https://github.com/pwwang/lichee
@input:
infile: The input file, could be one of:
- LICHeE input file. Set `args.params.cp = True` if input data represents cell prevalence instead of VAF.
- A pPyClone output directory.
- A MAF file.
@output:
outdir: The output directory
@args:
dot (str): Path to dot to generate figures
params (Diot): Other parameters for `lichee`.
- Set a larger `e` if you have noisy data. Default is 0.1.
"""),
input = 'infile:file',
output = 'outdir:dir:{{i.infile | stem}}.lichee',
lang = params.python.value,
args = Diot(
dot = params.dot.value,
lichee = params.lichee.value,
fishplot = True,
Rscript = params.Rscript.value,
devpars = Diot(res = 100),
params = Diot(maxVAFAbsent = 0.005, minVAFPresent = 0.005)
)
)
pPyClone2ClonEvol = Proc(desc = "Convert PyClone results to ClonEvol input format.")
pPyClone2ClonEvol.input = 'indir:dir'
pPyClone2ClonEvol.output = 'outfile:file:{{i.indir | fn}}.clonevol.txt'
pPyClone2ClonEvol.args.refgene = params.refgene.value
pPyClone2ClonEvol.args.drivers = []
pPyClone2ClonEvol.args.bedtools = params.bedtools.value
pPyClone2ClonEvol.lang = params.python.value
| {
"repo_name": "pwwang/bioprocs",
"path": "bioprocs/tumhet.py",
"copies": "1",
"size": "16907",
"license": "mit",
"hash": -2281256013743400000,
"line_mean": 39.1591448931,
"line_max": 213,
"alpha_frac": 0.6664695097,
"autogenerated": false,
"ratio": 2.88318553888131,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.404965504858131,
"avg_score": null,
"num_lines": null
} |
# A set of regression tests for open issues
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer, ReadOnly
from cocotb.result import TestFailure
from cocotb.binary import BinaryValue
@cocotb.coroutine
def send_data(dut):
dut.stream_in_valid = 1
yield RisingEdge(dut.clk)
dut.stream_in_valid = 0
@cocotb.coroutine
def monitor(dut):
for i in range(4):
yield RisingEdge(dut.clk)
yield ReadOnly()
if not dut.stream_in_valid.value.integer:
raise TestFailure("stream_in_valid should be high on the 5th cycle")
@cocotb.test()
def issue_120_scheduling(dut):
cocotb.fork(Clock(dut.clk, 2500).start())
cocotb.fork(monitor(dut))
yield RisingEdge(dut.clk)
# First attempt, not from coroutine - works as expected
for i in range(2):
dut.stream_in_valid = 1
yield RisingEdge(dut.clk)
dut.stream_in_valid = 0
yield RisingEdge(dut.clk)
# Failure - we don't drive valid on the rising edge even though
# behaviour should be identical to the above
yield send_data(dut)
dut.stream_in_valid = 1
yield RisingEdge(dut.clk)
dut.stream_in_valid = 0
yield RisingEdge(dut.clk)
| {
"repo_name": "mkreider/cocotb2",
"path": "tests/test_cases/issue_120/issue_120.py",
"copies": "5",
"size": "1215",
"license": "bsd-3-clause",
"hash": -5136968624657797000,
"line_mean": 24.3125,
"line_max": 76,
"alpha_frac": 0.6930041152,
"autogenerated": false,
"ratio": 3.1476683937823835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
"""A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
"""
from django.conf import settings
from django.db.utils import DatabaseError
from rapidsms.templatetags.tabs_tags import Tab
from uganda_common.models import Access
def authtabs(request):
"""
a context processor that adds Tabs to layout.html in RapidSMS. Tab loading is reworked to allow for privileged
user Tab access.
"""
tabs = []
for view, caption in settings.RAPIDSMS_TABS:
tabs.append(Tab(view, caption))
try:
if request.user.is_authenticated():
auth_tabs = getattr(settings, 'AUTHENTICATED_TABS', [])
for view, caption in auth_tabs:
tabs.append(Tab(view, caption))
try:
access = Access.objects.get(user=request.user)
for i in range(len(tabs)):
if access.denied(request, u_path=tabs[i].url):
tabs.remove(tabs[i])
except Access.DoesNotExist:
pass
except DatabaseError:
pass
for tab in tabs:
tab.is_active = tab.url == request.get_full_path()
return {
"tabs":tabs
}
except:
return {}
def module(request):
if request.GET and 'as_module' in request.GET:
return {
"as_module":True
}
return {}
| {
"repo_name": "unicefuganda/edtrac",
"path": "edtrac_project/rapidsms_uganda_common/uganda_common/context_processors.py",
"copies": "1",
"size": "1558",
"license": "bsd-3-clause",
"hash": 3455807045716507600,
"line_mean": 30.7959183673,
"line_max": 115,
"alpha_frac": 0.5866495507,
"autogenerated": false,
"ratio": 4.3277777777777775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02443127145298654,
"num_lines": 49
} |
"""A set of resources to download via API or URL links"""
from pyppl import Proc
from diot import Diot
from . import params, proc_factory
pTxt = proc_factory(
desc = 'Download CSV format files.',
config = Diot(annotate = """
@name:
pTxt
@description:
Download CSV format files.
@input:
`in`: The name of the resource
@output:
`outfile:file`: The output file
@args:
`cols`: Select the columns to keep. Default: '' (all cols)
`rowfilter`: Filter rows. For example, to filter out rows not start with 'Chr':
- `"lambda x: not x[0].startswith('Chr')"`
- Note that rowfilter applied before cols filter.
`urls`: Available resources and their urls.
`gz`: Whether to gzip the output file.
@requires:
[`curl`](https://en.wikipedia.org/wiki/CURL)
"""))
pTxt.input = "in"
pTxt.output = "outfile:file:{{i.in}}.txt{{args.gz | lambda x: '.gz' if x else ''}}"
pTxt.args.gz = False
pTxt.args.delimit = "\t"
pTxt.args.skip = 0
pTxt.args.cols = ''
pTxt.args.header = True
pTxt.args.rowfilter = ''
pTxt.args.transform = ''
pTxt.args.username = ''
pTxt.args.password = ''
pTxt.args.curl = params.curl.value
# pTxt.tplenvs.txtFilter = txt.filter.py
# pTxt.tplenvs.txtTransform = txt.transform.py
# pTxt.tplenvs.downloadCurl = download.curl.py
# pTxt.tplenvs.runcmd = runcmd.py
pTxt.args.urls = Diot({
'drugbank-target-all': 'https://www.drugbank.ca/releases/5-0-7/downloads/target-all-uniprot-links',
'drugbank-target-approved': 'https://www.drugbank.ca/releases/5-0-7/downloads/target-approved-uniprot-links',
'ccle-sample-info': 'https://data.broadinstitute.org/ccle_legacy_data/cell_line_annotations/CCLE_sample_info_file_2012-10-18.txt',
'ccle-rseq-rpkm': 'https://data.broadinstitute.org/ccle/CCLE_RNAseq_081117.rpkm.gct',
'ccle-rseq-reads': 'https://data.broadinstitute.org/ccle/CCLE_RNAseq_081117.reads.gct',
'KEGG_2016_gmt': 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=KEGG_2016',
'GO_Molecular_Function_2017_gmt': 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=GO_Molecular_Function_2017',
'GO_Cellular_Component_2017_gmt': 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=GO_Cellular_Component_2017',
'GO_Biological_Process_2017_gmt': 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=GO_Biological_Process_2017',
'TargetScan_microRNA': 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=TargetScan_microRNA',
'TRANSFAC_and_JASPAR_PWMs': 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=TRANSFAC_and_JASPAR_PWMs',
})
pTxt.lang = params.python.value
pGtf = proc_factory(
desc = 'Download GTF files.',
config = Diot(annotate = """
@name:
pGtf
"""))
pGtf.input = "in"
pGtf.output = "outfile:file:{{in}}.gtf{{args.gz | lambda x: '.gz' if x else ''}}"
pGtf.args.gz = False
pGtf.args.curl = 'curl'
pGtf.args.username = ''
pGtf.args.password = ''
pGtf.args.genepredtogtf = 'genePredToGtf'
# pGtf.tplenvs.runcmd = runcmd.py
# pGtf.tplenvs.downloadCurl = download.curl.py
pGtf.args.urls = {
'hg19-refgene': 'http://hgdownload.cse.ucsc.edu/goldenPath/hg19/database/refGene.txt.gz',
'hg19-knowngene': 'http://hgdownload.cse.ucsc.edu/goldenPath/hg19/database/knownGene.txt.gz',
'hg38-refgene': 'http://hgdownload.cse.ucsc.edu/goldenPath/hg38database/refGene.txt.gz',
'hg38-knowngene': 'http://hgdownload.cse.ucsc.edu/goldenPath/hg38/database/knownGene.txt.gz',
}
pGtf.lang = params.python.value
pGtf.script = """
import os, shutil
from subprocess import check_output
{{downloadCurl}}
{{runcmd}}
url = {{args.urls | json}}["{{in}}"]
tmpdir = "{{job.outdir}}/tmp"
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
downfile = os.path.join(tmpdir, 'downloaded')
downloadCurl(url, downfile, {{args.username | quote}}, {{args.password | quote}}, {{args.curl | quote}})
outfile = "{{outfile}}"[:-3] if {{args.gz | bool}} else "{{outfile}}"
output = check_output(['file', downfile])
if 'gzip' in output:
ugfile = downfile + '.ungz'
with open(ugfile, 'w') as f:
f.write(check_output(['gunzip', downfile, '-c']))
downfile = ugfile
elif 'Zip' in output:
zipdir = os.path.join(tmpdir, '_unzipped')
import zipfile, glob
zipref = zipfile.ZipFile(downfile, 'r')
zipref.extractall(zipdir)
zipref.close()
downfile = glob.glob(os.path.join(zipdir, '*'))[0]
cutfile = downfile + '.cutf2'
with open(downfile) as fin, open(cutfile, 'w') as fout:
for line in fin:
if not line.strip(): continue
fout.write("\\t".join(line.split("\\t")[1:]))
runcmd ('{{args.genepredtogtf}} file "%s" "%s" -source="{{in}}"' % (cutfile, outfile))
if {{args.gz | bool}}:
runcmd ('gz "%s"' % outfile)
shutil.rmtree(tmpdir)
"""
| {
"repo_name": "pwwang/bioprocs",
"path": "bioprocs/resource.py",
"copies": "1",
"size": "4923",
"license": "mit",
"hash": 6314637772847483000,
"line_mean": 41.8086956522,
"line_max": 135,
"alpha_frac": 0.6721511274,
"autogenerated": false,
"ratio": 2.685761047463175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3857912174863175,
"avg_score": null,
"num_lines": null
} |
"""A set of re-usable functions for mapping inputs to outputs
For each function we need to map the current state of a set of input variables
to a set of output *distributions*.
NOTES
-----
1. For variables with only two states which we consider binary, we adopt the
convention of state 0 = False and state 1 = True
2. I use the convention of prefixing them with `f_` to avoid conflicts with
keywords (like `and`).
"""
def same_(i, o):
o[i] = 1.0
def rotate_right_(i, o):
ii = (i + 1) % len(o)
o[ii] = 1.0
def xnor_(i1, i2, o):
if i1 == i2:
o[1] = 1.0
else:
o[0] = 1.0
def xor_(i1, i2, o):
if (i1 or i2) and not (i1 and i2):
o[1] = 1.0
else:
o[0] = 1.0
def and_(i1, i2, o):
if i1 and i2:
o[1] = 1.0
else:
o[0] = 1.0
def anotb_(i1, i2, o):
if i1 and not i2:
o[1] = 1.0
else:
o[0] = 1.0
def or_(i1, i2, o):
if i1 or i2:
o[1] = 1.0
else:
o[0] = 1.0
def branch_same_(i, o1, o2):
o1[i] = 1.0
o2[i] = 1.0
| {
"repo_name": "brettc/causalinfo",
"path": "causalinfo/equations.py",
"copies": "1",
"size": "1063",
"license": "mit",
"hash": -8949569322327518000,
"line_mean": 16.1451612903,
"line_max": 78,
"alpha_frac": 0.5230479774,
"autogenerated": false,
"ratio": 2.5552884615384617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8578336438938462,
"avg_score": 0,
"num_lines": 62
} |
"""A set of reward utilities written by the authors of dm_control"""
from multiprocessing import Value
import numpy as np
# The value returned by tolerance() at `margin` distance from `bounds` interval.
_DEFAULT_VALUE_AT_MARGIN = 0.1
def _sigmoids(x, value_at_1, sigmoid):
"""Returns 1 when `x` == 0, between 0 and 1 otherwise.
Args:
x: A scalar or numpy array.
value_at_1: A float between 0 and 1 specifying the output when `x` == 1.
sigmoid: String, choice of sigmoid type.
Returns:
A numpy array with values between 0.0 and 1.0.
Raises:
ValueError: If not 0 < `value_at_1` < 1, except for `linear`, `cosine` and
`quadratic` sigmoids which allow `value_at_1` == 0.
ValueError: If `sigmoid` is of an unknown type.
"""
if sigmoid in ('cosine', 'linear', 'quadratic'):
if not 0 <= value_at_1 < 1:
raise ValueError(
'`value_at_1` must be nonnegative and smaller than 1, '
'got {}.'.format(value_at_1))
else:
if not 0 < value_at_1 < 1:
raise ValueError('`value_at_1` must be strictly between 0 and 1, '
'got {}.'.format(value_at_1))
if sigmoid == 'gaussian':
scale = np.sqrt(-2 * np.log(value_at_1))
return np.exp(-0.5 * (x * scale)**2)
elif sigmoid == 'hyperbolic':
scale = np.arccosh(1 / value_at_1)
return 1 / np.cosh(x * scale)
elif sigmoid == 'long_tail':
scale = np.sqrt(1 / value_at_1 - 1)
return 1 / ((x * scale)**2 + 1)
elif sigmoid == 'reciprocal':
scale = 1 / value_at_1 - 1
return 1 / (abs(x) * scale + 1)
elif sigmoid == 'cosine':
scale = np.arccos(2 * value_at_1 - 1) / np.pi
scaled_x = x * scale
return np.where(
abs(scaled_x) < 1, (1 + np.cos(np.pi * scaled_x)) / 2, 0.0)
elif sigmoid == 'linear':
scale = 1 - value_at_1
scaled_x = x * scale
return np.where(abs(scaled_x) < 1, 1 - scaled_x, 0.0)
elif sigmoid == 'quadratic':
scale = np.sqrt(1 - value_at_1)
scaled_x = x * scale
return np.where(abs(scaled_x) < 1, 1 - scaled_x**2, 0.0)
elif sigmoid == 'tanh_squared':
scale = np.arctanh(np.sqrt(1 - value_at_1))
return 1 - np.tanh(x * scale)**2
else:
raise ValueError('Unknown sigmoid type {!r}.'.format(sigmoid))
def tolerance(x,
bounds=(0.0, 0.0),
margin=0.0,
sigmoid='gaussian',
value_at_margin=_DEFAULT_VALUE_AT_MARGIN):
"""Returns 1 when `x` falls inside the bounds, between 0 and 1 otherwise.
Args:
x: A scalar or numpy array.
bounds: A tuple of floats specifying inclusive `(lower, upper)` bounds for
the target interval. These can be infinite if the interval is unbounded
at one or both ends, or they can be equal to one another if the target
value is exact.
margin: Float. Parameter that controls how steeply the output decreases as
`x` moves out-of-bounds.
* If `margin == 0` then the output will be 0 for all values of `x`
outside of `bounds`.
* If `margin > 0` then the output will decrease sigmoidally with
increasing distance from the nearest bound.
sigmoid: String, choice of sigmoid type. Valid values are: 'gaussian',
'linear', 'hyperbolic', 'long_tail', 'cosine', 'tanh_squared'.
value_at_margin: A float between 0 and 1 specifying the output value when
the distance from `x` to the nearest bound is equal to `margin`. Ignored
if `margin == 0`.
Returns:
A float or numpy array with values between 0.0 and 1.0.
Raises:
ValueError: If `bounds[0] > bounds[1]`.
ValueError: If `margin` is negative.
"""
lower, upper = bounds
if lower > upper:
raise ValueError('Lower bound must be <= upper bound.')
if margin < 0:
raise ValueError('`margin` must be non-negative. Current value: {}'.format(margin))
in_bounds = np.logical_and(lower <= x, x <= upper)
if margin == 0:
value = np.where(in_bounds, 1.0, 0.0)
else:
d = np.where(x < lower, lower - x, x - upper) / margin
value = np.where(in_bounds, 1.0, _sigmoids(d, value_at_margin,
sigmoid))
return float(value) if np.isscalar(x) else value
def inverse_tolerance(x,
bounds=(0.0, 0.0),
margin=0.0,
sigmoid='reciprocal'):
"""Returns 0 when `x` falls inside the bounds, between 1 and 0 otherwise.
Args:
x: A scalar or numpy array.
bounds: A tuple of floats specifying inclusive `(lower, upper)` bounds for
the target interval. These can be infinite if the interval is unbounded
at one or both ends, or they can be equal to one another if the target
value is exact.
margin: Float. Parameter that controls how steeply the output decreases as
`x` moves out-of-bounds.
* If `margin == 0` then the output will be 0 for all values of `x`
outside of `bounds`.
* If `margin > 0` then the output will decrease sigmoidally with
increasing distance from the nearest bound.
sigmoid: String, choice of sigmoid type. Valid values are: 'gaussian',
'linear', 'hyperbolic', 'long_tail', 'cosine', 'tanh_squared'.
value_at_margin: A float between 0 and 1 specifying the output value when
the distance from `x` to the nearest bound is equal to `margin`. Ignored
if `margin == 0`.
Returns:
A float or numpy array with values between 0.0 and 1.0.
Raises:
ValueError: If `bounds[0] > bounds[1]`.
ValueError: If `margin` is negative.
"""
bound = tolerance(x,
bounds=bounds,
margin=margin,
sigmoid=sigmoid,
value_at_margin=0)
return 1 - bound
def rect_prism_tolerance(curr, zero, one):
"""Computes a reward if curr is inside a rectangluar prism region.
The 3d points curr and zero specify 2 diagonal corners of a rectangular
prism that represents the decreasing region.
one represents the corner of the prism that has a reward of 1.
zero represents the diagonal opposite corner of the prism that has a reward
of 0.
Curr is the point that the prism reward region is being applied for.
Args:
curr(np.ndarray): The point who's reward is being assessed.
shape is (3,).
zero(np.ndarray): One corner of the rectangular prism, with reward 0.
shape is (3,)
one(np.ndarray): The diagonal opposite corner of one, with reward 1.
shape is (3,)
"""
in_range = lambda a, b, c: float(b <= a <=c) if c >= b else float(c <= a <= b)
in_prism = (in_range(curr[0], zero[0], one[0]) and
in_range(curr[1], zero[1], one[1]) and
in_range(curr[2], zero[2], one[2]))
if in_prism:
diff = one - zero
x_scale = (curr[0] - zero[0]) / diff[0]
y_scale = (curr[1] - zero[1]) / diff[1]
z_scale = (curr[2] - zero[2]) / diff[2]
return x_scale * y_scale * z_scale
# return 0.01
else:
return 1.
def hamacher_product(a, b):
"""The hamacher (t-norm) product of a and b.
computes (a * b) / ((a + b) - (a * b))
Args:
a (float): 1st term of hamacher product.
b (float): 2nd term of hamacher product.
Raises:
ValueError: a and b must range between 0 and 1
Returns:
float: The hammacher product of a and b
"""
if not ((0. <= a <= 1.) and (0. <= b <= 1.)):
raise ValueError("a and b must range between 0 and 1")
denominator = a + b - (a * b)
h_prod = ((a * b) / denominator) if denominator > 0 else 0
assert 0. <= h_prod <= 1.
return h_prod
| {
"repo_name": "rlworkgroup/metaworld",
"path": "metaworld/envs/reward_utils.py",
"copies": "1",
"size": "8058",
"license": "mit",
"hash": -2308301391546246000,
"line_mean": 35.6272727273,
"line_max": 91,
"alpha_frac": 0.5738396624,
"autogenerated": false,
"ratio": 3.6946354883081156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4768475150708116,
"avg_score": null,
"num_lines": null
} |
# A set of routines for finding images and the like based on file names.
# This includes all of the appropriate handling for templates, directories
# and the like.
#
# Also routines for grouping sets of images together into sweeps based on
# the file names and the information in image headers.
import logging
import math
import os
import re
import string
logger = logging.getLogger("xia2.Experts.FindImages")
# N.B. these are reversed patterns...
patterns = [
r"([0-9]{2,12})\.(.*)",
r"(.*)\.([0-9]{2,12})_(.*)",
r"(.*)\.([0-9]{2,12})(.*)",
]
joiners = [".", "_", ""]
compiled_patterns = [re.compile(pattern) for pattern in patterns]
def template_regex(filename):
"""Try a bunch of templates to work out the most sensible. N.B. assumes
that the image index will be the last digits found in the file name."""
rfilename = filename[::-1]
global patterns, compiled_patterns
template = None
digits = None
for j, cp in enumerate(compiled_patterns):
match = cp.match(rfilename)
if not match:
continue
groups = match.groups()
if len(groups) == 3:
exten = "." + groups[0][::-1]
digits = groups[1][::-1]
prefix = groups[2][::-1] + joiners[j]
else:
exten = ""
digits = groups[0][::-1]
prefix = groups[1][::-1] + joiners[j]
template = prefix + ("#" * len(digits)) + exten
break
if not template:
raise RuntimeError("template not recognised for %s" % filename)
return template, int(digits)
def work_template_regex():
questions_answers = {
"foo_bar_001.img": "foo_bar_###.img",
"foo_bar001.img": "foo_bar###.img",
"foo_bar_1.8A_001.img": "foo_bar_1.8A_###.img",
"foo_bar.001": "foo_bar.###",
"foo_bar_001.img1000": "foo_bar_###.img1000",
"foo_bar_00001.img": "foo_bar_#####.img",
}
for filename in questions_answers:
answer = template_regex(filename)
assert answer[0] == questions_answers[filename]
def image2template(filename):
return template_regex(filename)[0]
def image2image(filename):
return template_regex(filename)[1]
def image2template_directory(filename):
"""Separate out the template and directory from an image name."""
directory, image = os.path.split(os.path.abspath(filename))
from xia2.Applications.xia2setup import is_hdf5_name
if is_hdf5_name(filename):
return image, directory
template = image2template(image)
return template, directory
def find_matching_images(template, directory):
"""Find images which match the input template in the directory
provided."""
files = os.listdir(directory)
# to turn the template to a regular expression want to replace
# however many #'s with EXACTLY the same number of [0-9] tokens,
# e.g. ### -> ([0-9]{3})
# change 30/may/2008 - now escape the template in this search to cope with
# file templates with special characters in them, such as "+" -
# fix to a problem reported by Joel B.
length = template.count("#")
regexp_text = re.escape(template).replace("\\#" * length, "([0-9]{%d})" % length)
regexp = re.compile(regexp_text)
# FIXME there are faster ways of determining this - by generating the lists
# of possible images. That said, the code for this is now in dxtbx...
images = []
for f in files:
match = regexp.match(f)
if match:
images.append(int(match.group(1)))
images.sort()
return images
def template_directory_number2image(template, directory, number):
"""Construct the full path to an image from the template, directory
and image number."""
# FIXME why does this duplicate code shown below??
length = template.count("#")
# check that the number will fit in the template
if (math.pow(10, length) - 1) < number:
raise RuntimeError("number too big for template")
# construct a format statement to give the number part of the
# template
format = "%%0%dd" % length
# construct the full image name
image = os.path.join(directory, template.replace("#" * length, format % number))
return image
def common_prefix(strings):
"""Find a common prefix among the list of strings. May return an empty
string. This is O(n^2)."""
common = strings[0]
finished = False
while not finished:
finished = True
for s in strings:
if not common == s[: len(common)]:
common = common[:-1]
finished = False
continue
return common
def ensure_no_batches_numbered_zero(template, images, offset):
"""Working in collaboration with digest_template, ensure that none of
the images end up being numbered 0, and if they do try to add last digit of
template section. Finally, if this extra character is not a digit raise
an exception."""
if min(images) > 0:
return template, images, offset
prefix = template.split("#")[0]
suffix = template.split("#")[-1]
hashes = template.count("#")
while min(images) == 0:
if not prefix[-1] in string.digits:
raise RuntimeError("image 0 found matching %s" % template)
add = int(prefix[-1]) * int(math.pow(10, hashes))
offset -= add
hashes += 1
prefix = prefix[:-1]
images = [add + i for i in images]
template = "%s%s%s" % (prefix, "#" * hashes, suffix)
return template, images, offset
def digest_template(template, images):
"""Digest the template and image numbers to copy as much of the
common characters in the numbers as possible to the template to
give smaller image numbers."""
length = template.count("#")
format = "%%0%dd" % length
strings = [format % i for i in images]
offset = 0
if len(strings) > 1:
prefix = common_prefix(strings)
if prefix:
offset = int(prefix + "0" * (length - len(prefix)))
template = template.replace(len(prefix) * "#", prefix, 1)
images = [int(s.replace(prefix, "", 1)) for s in strings]
try:
template, images, offset = ensure_no_batches_numbered_zero(
template, images, offset
)
except RuntimeError:
logger.debug("Throwing away image 0 from template %s", template)
template, images, offset = ensure_no_batches_numbered_zero(
template, images[1:], offset
)
return template, images, offset
if __name__ == "__main__":
work_template_regex()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Experts/FindImages.py",
"copies": "1",
"size": "6640",
"license": "bsd-3-clause",
"hash": 8144996665236907000,
"line_mean": 26.6666666667,
"line_max": 85,
"alpha_frac": 0.6153614458,
"autogenerated": false,
"ratio": 3.871720116618076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4987081562418076,
"avg_score": null,
"num_lines": null
} |
""" A set of scoring functions. """
import numpy as np
from .pyglmnet import _logL
def deviance(y, yhat, distr):
"""Deviance metrics.
Parameters
----------
y : array
Target labels of shape (n_samples, )
yhat : array
Predicted labels of shape (n_samples, )
distr: str
distribution
Returns
-------
score : float
Deviance of the predicted labels.
"""
if distr in ['softplus', 'poisson']:
LS = _logL(distr, y, y)
else:
LS = 0
L1 = _logL(distr, y, yhat)
score = -2 * (L1 - LS)
return score
def pseudo_R2(X, y, yhat, ynull_, distr):
"""Pseudo-R2 metric.
Parameters
----------
y : array
Target labels of shape (n_samples, )
yhat : array
Predicted labels of shape (n_samples, )
ynull_ : float
Mean of the target labels (null model prediction)
distr: str
distribution
Returns
-------
score : float
Pseudo-R2 score.
"""
if distr in ['softplus', 'poisson']:
LS = _logL(distr, y, y)
else:
LS = 0
L0 = _logL(distr, y, ynull_)
L1 = _logL(distr, y, yhat)
if distr in ['softplus', 'poisson']:
score = (1 - (LS - L1) / (LS - L0))
else:
score = (1 - L1 / L0)
return score
def accuracy(y, yhat):
"""Accuracy as ratio of correct predictions.
Parameters
----------
y : array
Target labels of shape (n_samples, )
yhat : array
Predicted labels of shape (n_samples, )
Returns
-------
accuracy : float
Accuracy score.
"""
return float(np.sum(y == yhat)) / yhat.shape[0]
| {
"repo_name": "pavanramkumar/pyglmnet",
"path": "pyglmnet/metrics.py",
"copies": "1",
"size": "1683",
"license": "mit",
"hash": -4882592893828541000,
"line_mean": 17.9101123596,
"line_max": 57,
"alpha_frac": 0.5246583482,
"autogenerated": false,
"ratio": 3.386317907444668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9410976255644667,
"avg_score": 0,
"num_lines": 89
} |
""" A set of scoring functions. """
import numpy as np
from .pyglmnet import _logL
def deviance(y, yhat, distr, theta):
"""Deviance metrics.
Parameters
----------
y : array
Target labels of shape (n_samples, )
yhat : array
Predicted labels of shape (n_samples, )
distr: str
distribution
Returns
-------
score : float
Deviance of the predicted labels.
"""
if distr in ['softplus', 'poisson', 'neg-binomial']:
LS = _logL(distr, y, y, theta=theta)
else:
LS = 0
L1 = _logL(distr, y, yhat, theta=theta)
score = -2 * (L1 - LS)
return score
def pseudo_R2(X, y, yhat, ynull_, distr, theta):
"""Pseudo-R2 metric.
Parameters
----------
y : array
Target labels of shape (n_samples, )
yhat : array
Predicted labels of shape (n_samples, )
ynull_ : float
Mean of the target labels (null model prediction)
distr: str
distribution
Returns
-------
score : float
Pseudo-R2 score.
"""
if distr in ['softplus', 'poisson', 'neg-binomial']:
LS = _logL(distr, y, y, theta=theta)
else:
LS = 0
L0 = _logL(distr, y, ynull_, theta=theta)
L1 = _logL(distr, y, yhat, theta=theta)
if distr in ['softplus', 'poisson', 'neg-binomial']:
score = (1 - (LS - L1) / (LS - L0))
else:
score = (1 - L1 / L0)
return score
def accuracy(y, yhat):
"""Accuracy as ratio of correct predictions.
Parameters
----------
y : array
Target labels of shape (n_samples, )
yhat : array
Predicted labels of shape (n_samples, )
Returns
-------
accuracy : float
Accuracy score.
"""
return float(np.sum(y == yhat)) / yhat.shape[0]
| {
"repo_name": "glm-tools/pyglmnet",
"path": "pyglmnet/metrics.py",
"copies": "1",
"size": "1810",
"license": "mit",
"hash": -4997021577522525000,
"line_mean": 19.3370786517,
"line_max": 57,
"alpha_frac": 0.5392265193,
"autogenerated": false,
"ratio": 3.376865671641791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4416092190941791,
"avg_score": null,
"num_lines": null
} |
# A set of sorting algorithm implementations that will log the sorting
# process in a neat graph. Thus, the main goal is more of an educational
# one, or, in my case, algorithmic composition.
import copy
from GraphStructures import *
class QuickSorter():
# this implementation doesn't really work in-place, but it's not that
# important for this type of task ...
def __init__(self, unsorted, direction="asc"):
self.unsorted = unsorted
self.sorting_graph = Graph()
self.node_counter = 0
self.direction = direction
def quicksort(self, node_to_sort, prune_pivots):
if len(node_to_sort.content) == 0:
return node_to_sort
# generate pivot and unsorted lists
pivot = node_to_sort.content[0]
# set sorting direction
if self.direction == "asc":
left_unsorted = [x for x in node_to_sort.content[1:] if x <= pivot]
right_unsorted = [x for x in node_to_sort.content[1:] if x > pivot]
else:
left_unsorted = [x for x in node_to_sort.content[1:] if x > pivot]
right_unsorted = [x for x in node_to_sort.content[1:] if x <= pivot]
# generate graph
self.node_counter += 1
left_unsorted_node = Node(self.node_counter, left_unsorted)
self.sorting_graph.add_node(left_unsorted_node)
self.sorting_graph.add_edge(node_to_sort.id, left_unsorted_node.id)
# increment node counter, create pivot node
self.node_counter += 1
# create pivot node and mark it as pivot, using the metadata ...
pivot_node = Node(self.node_counter, [pivot], "pivot")
self.sorting_graph.add_node(pivot_node)
self.sorting_graph.add_edge(node_to_sort.id, pivot_node.id)
# create right node
self.node_counter += 1
right_unsorted_node = Node(self.node_counter, right_unsorted)
self.sorting_graph.add_node(right_unsorted_node)
self.sorting_graph.add_edge(node_to_sort.id, right_unsorted_node.id)
# SORTING RECURSION !!
# SORTING RECURSION !!!
left_sorted_node = self.quicksort(left_unsorted_node, prune_pivots)
right_sorted_node = self.quicksort(right_unsorted_node, prune_pivots)
# SORTING RECURSION !!!
# SORTING RECURSION !!
# assemble sorted lists, the one to be returned ...
result = left_sorted_node.content + [pivot] + right_sorted_node.content
# complete graph
self.node_counter += 1
result_node = Node(self.node_counter, result)
self.sorting_graph.add_node(result_node)
# add edges
self.sorting_graph.add_edge(left_sorted_node.id, result_node.id)
self.sorting_graph.add_edge(right_sorted_node.id, result_node.id)
if not prune_pivots:
self.sorting_graph.add_edge(pivot_node.id, result_node.id)
return result_node
def sort(self, *args, **kwargs):
# generate initial node
prune_pivots = kwargs.get("prune_pivots", False)
initial_node = Node(self.node_counter, self.unsorted)
self.sorting_graph.add_node(initial_node)
# return the final node's content
return self.quicksort(initial_node, prune_pivots).content
class MergeSorter():
def __init__(self, unsorted, direction="asc"):
self.unsorted = unsorted
self.sorting_graph = Graph()
# count the sorting steps, this will serve as node id
# the incremental nature of this also allows us to recunstruct the
# sorting process later on
self.step_counter = 0
# the sorting direction, ascending or descending
self.direction = direction
def merge(self, left_sorted_node, right_sorted_node):
result = []
i = 0
j = 0
while(i < len(left_sorted_node.content) and j < len(right_sorted_node.content)):
if self.direction == "asc":
# merge ascending
if left_sorted_node.content[i] <= right_sorted_node.content[j]:
result.append(left_sorted_node.content[i])
i = i + 1
else:
result.append(right_sorted_node.content[j])
j = j + 1
elif self.direction == "desc":
if left_sorted_node.content[i] <= right_sorted_node.content[j]:
result.append(left_sorted_node.content[i])
i = i + 1
else:
result.append(right_sorted_node.content[j])
j = j + 1
result += left_sorted_node.content[i:]
result += right_sorted_node.content[j:]
self.step_counter += 1
merged_node = Node(self.step_counter, result)
self.sorting_graph.add_node(merged_node)
self.sorting_graph.add_edge(left_sorted_node.id, merged_node.id)
self.sorting_graph.add_edge(right_sorted_node.id, merged_node.id)
# return list and node id
return merged_node
def mergesort(self, current_node):
if len(current_node.content) < 2:
return current_node
else:
middle = len(current_node.content) // 2
# add UNsorted nodes to graph, whilst increasing step counters
self.step_counter += 1
left_unsorted_node = Node(self.step_counter, current_node.content[:middle])
self.step_counter += 1
right_unsorted_node = Node(self.step_counter, current_node.content[middle:])
self.sorting_graph.add_node(left_unsorted_node)
self.sorting_graph.add_node(right_unsorted_node)
#add edges for UNsorted nodes to graph
self.sorting_graph.add_edge(current_node.id, left_unsorted_node.id)
self.sorting_graph.add_edge(current_node.id, right_unsorted_node.id)
# actually sort the lists ...
left_sorted_node = self.mergesort(left_unsorted_node)
right_sorted_node = self.mergesort(right_unsorted_node)
# finish sorting recursion
return self.merge(left_sorted_node, right_sorted_node)
def sort(self):
# generate initial node
initial_node = Node(self.step_counter, self.unsorted)
self.sorting_graph.add_node(initial_node)
# return the final node's content
return self.mergesort(initial_node).content
class SorterTool():
# remove outgoing edges from pivot nodes in a quicksort graph
def quicksort_prune_pivots(self, graph):
pruned_graph = copy.deepcopy(graph)
for i in range(0, len(pruned_graph.nodes)):
node = pruned_graph.nodes[i]
if node.meta == "pivot":
pruned_graph.edges[node.id] = []
return pruned_graph
| {
"repo_name": "the-drunk-coder/graph-o-sort",
"path": "GraphingSorters.py",
"copies": "1",
"size": "6759",
"license": "unlicense",
"hash": 9199313406089593000,
"line_mean": 46.2657342657,
"line_max": 88,
"alpha_frac": 0.613108448,
"autogenerated": false,
"ratio": 3.730132450331126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4843240898331126,
"avg_score": null,
"num_lines": null
} |
''' A set of standard sources, filters, sorters and sinks
'''
import random
import datetime
from track_manager import tlib
import json
class Annotator(object):
''' Annotates the tracks in a stream with external information
:param source: the source of tracks
:param type: the type of annotation (spotify, echonest)
'''
def __init__(self, source, type):
self.annotator = tlib.get_annotator(type)
self.name = source.name + ' annotated with ' + type + ' data'
self.source = source
self.buffer = []
self.fillbuf = []
def next_track(self):
while len(self.fillbuf) < self.annotator['batch_size']:
track = self.source.next_track()
if track:
self.buffer.append(track)
tinfo = tlib.get_track(track)
if type not in tinfo:
self.fillbuf.append(track)
else:
break
if len(self.fillbuf) > 0:
self._fetch_fillbuf()
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
def _fetch_fillbuf(self):
self.annotator['annotator'](self.fillbuf)
self.fillbuf = []
class FakeTrackSource(object):
'''
Generates a series of fake tracks, suitable for testing
param: count: the number of tracks to generate
'''
def __init__(self, count=10):
self.name = 'FakeTracks'
self.count = count
self.fake_id = 1000000
def next_track(self):
track = None
if self.count > 0:
track = tlib.make_track(self._fake_id(),
self._fake_name(), self._fake_name(), 180, 'FakeTrackSource')
self.count -= 1
return track
def _fake_id(self):
self.fake_id += 1
return str(self.fake_id)
def _fake_name(self):
nouns = 'vixen bear dog cat waters drums parade fire france'
adjectives = 'frumpy cold wet fast red jumpy strange weird nifty'
adj = random.choice(adjectives.split())
noun = random.choice(nouns.split())
return ' '.join([adj, noun])
class Split(object):
'''
Splits a stream into two streams
:param source: the source of the track stream
:param split_index: the index where the split occurs
'''
def __init__(self, source, split_index):
self.source = source
self.split_index = split_index
self.left_buffer = None
self.right_buffer = None
def _fill_buffer(self):
if self.left_buffer == None:
self.left_buffer = []
self.right_buffer = []
which = 0
while True:
track = self.source.next_track()
if track:
if which < self.split_index:
self.left_buffer.append(track)
else:
self.right_buffer.append(track)
else:
break
which += 1
class left_side(object):
def __init__(self, outer):
self.outer = outer
self.name = 'first ' + str(outer.split_index) \
+ ' tracks of ' + outer.source.name
def next_track(self):
self.outer._fill_buffer()
if len(self.outer.left_buffer) > 0:
return self.outer.left_buffer.pop(0)
else:
return None
class right_side(object):
def __init__(self, outer):
self.outer = outer
self.name = 'After the first ' + str(outer.split_index) \
+ ' tracks of ' + outer.source.name
def next_track(self):
self.outer._fill_buffer()
if len(self.outer.right_buffer) > 0:
return self.outer.right_buffer.pop(0)
else:
return None
def outputs(self):
return [self.left_side(self), self.right_side(self)]
class Looper(object):
'''
Given a source, generate a stream of a given size by circulating through
the tracks in the source
:param source: the stream source
:param max_size: the number of tracks returned
'''
def __init__(self, source, max_size=200):
self.name = 'looped ' + source.name
self.source = source
self.index = 0
self.buffer = []
self.looping = False
self.max_size = max_size
self.cur_size = 0
def next_track(self):
if self.cur_size >= self.max_size:
return None
if self.looping:
if len(self.buffer) == 0:
return None
else:
idx = self.index % len(self.buffer)
self.index += 1
track = self.buffer[idx]
else:
track = self.source.next_track()
if track == None:
self.looping = True
return self.next_track()
else:
self.buffer.append(track)
self.cur_size += 1
return track
class Shuffler(object):
''' Shuffles the tracks in the stream
:param source: the source of tracks
:param max_size: the maximum number of tracks to return
'''
def __init__(self, source, max_size=0):
self.name = 'shuffled ' + source.name
self.source = source
self.buffer = []
self.filling = True
self.max_size = max_size
def next_track(self):
while self.filling:
track = self.source.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
random.shuffle(self.buffer)
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class DeDup(object):
'''
Remove any duplicate tracks in the stream
:param source: the stream source
:param by_name: if True match by track ID and name
'''
def __init__(self, source, by_name = False):
self.name = 'dedupped ' + source.name
self.source = source
self.by_name = by_name
self.history = set()
def next_track(self):
track = None
while True:
track = self.source.next_track()
if track:
if self.by_name:
tname = tlib.get_tn(track).lower()
if tname in self.history:
continue
else:
self.history.add(tname)
if track in self.history:
continue
else:
self.history.add(track)
break
else:
break
return track
class Buffer(object):
'''
Buffer up the given number of tracks
:param source: the stream source
:param max_size: the size of the buffer
'''
def __init__(self, source, max_size=40):
self.name = 'buffered ' + source.name
self.source = source
self.buffer = []
self.filling = True
self.max_size = max_size
def next_track(self):
while self.filling:
track = self.source.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class LongerThan(object):
'''
Limit the stream, if possible, to tracks with a duration that is longer
than the given time
:param source: the source stream
:param time: the time in seconds
'''
def __init__(self, source, time=1200):
self.name = 'LongerThan ' + str(time) + ' secs'
self.source = source
self.time = time
self.cur_time = 0
def next_track(self):
if self.cur_time > self.time:
return None
else:
track = self.source.next_track()
if track:
duration = tlib.get_attr(track, 'duration')
self.cur_time += duration
return track
class ShorterThan(object):
'''
Limit the stream, if possible, to tracks with a duration that is just
shorter than the given time
:param source: the source stream
:param time: the time in seconds
'''
def __init__(self, source, time=1200):
self.name = 'Shorter Than ' + str(time) + ' secs'
self.source = source
self.time = time
self.cur_time = 0
def next_track(self):
if self.cur_time >= self.time:
return None
else:
track = self.source.next_track()
if track:
duration = tlib.get_attr(track, 'duration')
self.cur_time += duration
if self.cur_time >= self.time:
return None
return track
class Sorter(object):
'''
Sorts the tracks in the given stream by the given attribute
:param source: the source of the tracks
:param attr: the attribute to be sorted
:param reverse: if True reverse the sort
:param max_size: maximum tracks to sort
'''
def __init__(self, source, attr, reverse=False, max_size=0):
self.name = source.name + ' sorted by ' + attr + ('(reverse)' if reverse else '')
self.source = source
self.buffer = []
self.filling = True
self.max_size = max_size
self.attr = attr
self.reverse = reverse
self.annotator = get_annotator(source, attr)
def next_track(self):
while self.filling:
track = self.annotator.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
self.buffer.sort(reverse=self.reverse, key=lambda tid: tlib.get_attr(tid, self.attr))
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class CustomSorter(object):
'''
Sorts the tracks by a custom key
:param source: the source of the tracks
:param keyfunc: function that turns a track id into the sort key
:param reverse: if True reverse the sort
:param max_size: maximum tracks to sort
'''
def __init__(self, source, keyfunc, reverse=False, max_size=0):
self.name = source.name + ' custom sorted'
self.source = source
self.keyfunc = keyfunc
self.buffer = []
self.filling = True
self.max_size = max_size
self.reverse = reverse
def next_track(self):
while self.filling:
track = self.source.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
self.buffer.sort(reverse=self.reverse, key=self.keyfunc)
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class First(object):
'''
Returns the first tracks from a stream
:param source: the source of tracks
:param sample_size: the number of tracks to return
'''
def __init__(self, source, sample_size=10):
self.name = 'first ' + str(sample_size) + ' of ' + source.name
self.source = source
self.sample_size = sample_size
self.buffer = []
self.filling = True
def next_track(self):
while self.filling and len(self.buffer) < self.sample_size:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
if len(self.buffer) >= self.sample_size:
self.filling = False
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class Last(object):
'''
Returns the last tracks from a stream
:param source: the source of tracks
:param sample_size: the number of tracks to return
'''
def __init__(self, source, sample_size=10):
self.name = 'last ' + str(sample_size) + ' of ' + source.name
self.source = source
self.sample_size = sample_size
self.buffer = []
self.filling = True
def next_track(self):
while self.filling:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
self.buffer = self.buffer[-self.sample_size:]
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class Reverse(object):
'''
Reverses the order of the tracks in the stream
:param source: the source of tracks
'''
def __init__(self, source):
self.name = 'reverse of ' + source.name
self.source = source
self.buffer = []
self.filling = True
def next_track(self):
while self.filling:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class Sample(object):
'''
Randomly sample tracks from the stream
:param source: the source of tracks
:param sample_size: the number of tracks to return
'''
def __init__(self, source, sample_size=10):
self.name = 'Sampling ' + str(sample_size) \
+ ' tracks from ' + source.name
self.source = source
self.sample_size = sample_size
self.buffer = []
self.filling = True
def next_track(self):
while self.filling:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
random.shuffle(self.buffer)
self.buffer = self.buffer[:self.sample_size]
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class Concatenate(object):
'''
Concatenate multiple streams
:param source_list: a list of sources
'''
def __init__(self, source_list):
self.name = 'concatenating ' + ' '.join([s.name for s in source_list])
self.source_list = source_list
self.index = 0
def next_track(self):
track = None
while self.index < len(self.source_list):
track = self.source_list[self.index].next_track()
if track:
break
else:
self.index += 1
return track
class Alternate(object):
'''
Alternate tracks from multiple streams
:param source_list: a list of sources
'''
def __init__(self, source_list, fail_fast=False):
self.name = 'alternating between ' + ', '.join([s.name for s in source_list])
self.source_list = source_list
self.index = 0
self.fail_fast = fail_fast
def next_track(self):
tries = len(self.source_list)
while tries > 0:
idx = self.index % len(self.source_list)
self.index += 1
track = self.source_list[idx].next_track()
if track:
return track
else:
if self.fail_fast:
break
else:
tries -= 1
return None
class Conditional(object):
'''
Alternate tracks from two streams based on a conditional
:param source: the source of tracks
:param cond_func: a function that returns a boolean
:param trueSource: source of tracks when conf_func returns True
:param falseSource: source of tracks when conf_func returns False
'''
def __init__(self, cond_func, trueSource, falseSource):
self.name = 'Conditional of ' + ' '.join([trueSource.name, falseSource.name])
self.trueSource = trueSource
self.falseSource = falseSource
self.cond_func = cond_func
def next_track(self):
if self.cond_func():
return self.trueSource.next_track()
else:
return self.falseSource.next_track()
class Case(object):
'''
Selects tracks from streams based upon a mapping function
:param source: the source of tracks
:param func: a function that returns the source_map key
:param source_map: a may of key to source streams
'''
def __init__(self, func, source_map):
def default_behavior():
return None
self.name = 'Case of ' + ', '.join([n +':' + s.name for n,s in source_map.items()])
self.source_map = source_map
self.func = func
if not 'default' in self.source_map:
self.source_map['default'] = default_behavior
def next_track(self):
key = self.func()
if not key in self.source_map:
key = 'default'
source = self.source_map[key]
return source.next_track()
'''
Some handy dandy conditional funcs
'''
def is_day_of_week(day_of_week):
''' checks if cur day is given day of the week
:param day_of_week: Monday is 0 and Sunday is 6.
'''
def cond_func():
return datetime.datetime.today().weekday() == day_of_week
return cond_func
def get_simple_day_part():
'''
returns the daypart
'''
hour = datetime.datetime.today().hour
if hour < 12:
return 'morning'
elif hour < 18:
return 'afternoon'
elif hour < 22:
return 'evening'
else:
return 'night'
class AttributeRangeFilter(object):
'''
Filters tracks based upon range check of an attribute
:param source: the source of tracks
:param attr: the attribute of interest
:param match: if not None, attribute value must match this exactly
:param min_val: if not None, attribute value must be at least this
:param max_val: if not None, attribute value must be no more than this
'''
def __init__(self, source, attr, match=None,min_val=None,max_val=None):
self.name = source.name + ' filtered by ' + attr
self.source = source
self.attr = attr
self.match = match
self.min_val = min_val
self.max_val = max_val
self.match = match
self.annotator = get_annotator(source, attr)
def next_track(self):
while True:
good = True
track = self.annotator.next_track()
if track:
attr_val = tlib.get_attr(track, self.attr)
if attr_val == None:
good = False
elif self.match != None and attr_val != self.match:
good = False
else:
if self.min_val and attr_val < self.min_val:
good = False
if self.max_val and attr_val > self.max_val:
good = False
if good:
break
return track
class TrackFilter(object):
'''
Removes tracks from the stream based on a second stream
:param source: the source of tracks
:param filter: the stream of bad tracks to be removed
'''
def __init__(self, source, filter, invert=False):
self.name = source.name + ('inverse' if invert else '') +'filtered by ' + filter.name
self.source = source
self.filter = filter
self.bad_tracks = None
self.invert = invert
self.debug = False
def next_track(self):
if self.bad_tracks == None:
self.bad_tracks = set()
while True:
track = self.filter.next_track()
if track:
self.bad_tracks.add(track)
else:
break
while True:
track = self.source.next_track()
if track:
if self.invert and (track in self.bad_tracks):
return track
elif (not self.invert) and (track not in self.bad_tracks):
return track
else:
if self.debug:
print 'filtered out', tlib.get_tn(track)
else:
break
return track
class ArtistFilter(object):
'''
Removes tracks from the stream that have the given artists
:param source: the source of tracks
:param artistNames: the names of the artists to be removed
'''
def __init__(self, source, artistNames):
self.name = source.name + ' with songs by ' + ', '.join(artistNames) + ' removed'
self.source = source
self.bad_artists = set([a.lower() for a in artistNames])
self.debug = False
def next_track(self):
while True:
track = self.source.next_track()
if track:
tinfo = tlib.get_track(track)
if tinfo['artist'].lower() not in self.bad_artists:
return track
else:
if self.debug:
print 'filtered out', tlib.get_tn(track)
else:
break
return track
class Dumper(object):
'''
Dumps tracks to the terminal
:param source: the source of tracks
:param props: list of property names to be included in the dump
'''
def __init__(self, source, props):
self.name = 'dumper'
self.source = source
self.which = 1
self.props = props
def next_track(self):
track = self.source.next_track()
if track:
print self.which, tlib.get_tn(track)
if len(self.props) > 0:
for prop in self.props:
val = tlib.get_attr(track, prop)
if val != None:
print ' ', prop, '->', val
self.which += 1
return track
class Debugger(object):
'''
Shows details on each track in the stream
:param source: the source of tracks
'''
def __init__(self, source):
self.name = 'dumper'
self.source = source
def next_track(self):
track = self.source.next_track()
if track:
tinfo = tlib.get_track(track)
print json.dumps(tinfo, indent=4)
print
return track
class SaveToJson(object):
'''
Saves the stream to json
:param source: the source of tracks
:param name: the name of the json file
:param max_size: the max tracks to save
'''
def __init__(self, source, name='playlist.json', max_size=100):
self.name = 'SaveToJson ' + name
self.source = source
self.playlist_name = name
self.max_size = max_size
self.saved = False
self.buffer = []
def next_track(self):
track = self.source.next_track()
if track and len(self.buffer) < self.max_size:
self.buffer.append(track)
elif not self.saved:
self._save_playlist()
return track
def _save_playlist(self):
self.saved = True
f = open(self.playlist_name, 'w')
out = []
for tid in self.buffer:
t = tlib.get_track(tid)
if t:
out.append(t)
print >> f, json.dumps(out, indent=4)
f.close()
def get_annotator(source, attr):
fields = attr.split('.')
if len(fields) == 2:
type, name = fields
return Annotator(source, type)
return source
class PushableSource(object):
''' A source that allows you to push tracks
back for later retrieval
'''
def __init__(self, source):
self.source = source
self.name = 'pushable ' + source.name
self.buffer = []
def next_track(self):
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return self.source.next_track()
def push(self, track):
self.buffer.append(track)
| {
"repo_name": "plamere/pbl",
"path": "pbl/standard_plugs.py",
"copies": "1",
"size": "24806",
"license": "mit",
"hash": 4163572030834825700,
"line_mean": 28.886746988,
"line_max": 101,
"alpha_frac": 0.5323308877,
"autogenerated": false,
"ratio": 4.146773654296222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5179104541996222,
"avg_score": null,
"num_lines": null
} |
"""A set of stream oriented parsers for http requests and responses, inline
with the current draft recommendations from the http working group.
http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-17
Unlike other libraries, this is for clients, servers and proxies.
Missing:
comma parsing/header folding
"""
from gzip import GzipFile
import re
import zlib
from io import BytesIO
class ParseError(Exception):
"""Baseclass for all http parsing errors"""
pass
from hanzo.httptools.semantics import Codes, Methods
NEWLINES = (b'\r\n', b'\n')
class BrokenChunks(Exception):
pass
class HTTPMessage(object):
"""A stream based parser for http like messages"""
CONTENT_TYPE = b"application/http"
def __init__(self, header, buf=None, offset=0):
self.buffer = buf if buf is not None else bytearray()
self.offset = offset
self.header = header
self.body_chunks = []
self.mode = 'start'
self.body_reader = None
@property
def url(self):
return self.header.url
@property
def scheme(self):
return self.header.scheme
@property
def method(self):
return self.header.method
@property
def host(self):
return self.header.host
@property
def port(self):
return self.header.port
def feed_fd(self, fd):
while True:
length, terminator = self.feed_predict()
if length == 0:
return ''
elif terminator == '\r\n':
text = fd.readLine()
elif length < 0:
text = fd.read()
elif length > 0:
text = fd.read(length)
unread = self.feed(text)
if unread:
return unread
def feed_predict(self):
"""returns size, terminator request for input. size is 0 means end. """
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'headers':
return None, '\r\n'
elif self.mode == 'body':
if self.body_reader is not None:
return self.body_reader.feed_predict()
else:
# connection close
return -1, None
if self.mode == 'end':
return 0, None
if self.mode == 'incomplete':
return 0, None
def feed(self, text):
"""Push more text from the input stream into the parser."""
if text and self.mode == 'start':
text = self.feed_start(text)
if text and self.mode == 'headers':
text = self.feed_headers(text)
if self.mode == 'body':
if not self.header.has_body():
self.mode = 'end'
else:
if self.header.body_is_chunked():
self.body_reader = ChunkReader()
else:
length = self.header.body_length()
if length is not None:
encoding = self.header.encoding
if encoding and encoding.endswith(b'gzip'):
self.body_reader = ZipLengthReader(length,
text)
else:
self.body_reader = LengthReader(length)
length = self.body_reader.remaining
self.body_chunks = [(self.offset, length)]
if length == 0:
self.mode = 'end'
else:
self.body_chunks = [(self.offset, 0)]
self.body_reader = None
if text and self.mode == 'body':
if self.body_reader is not None:
try:
text = self.body_reader.feed(self, text)
except BrokenChunks:
self.body_reader = None
self.body_chunks = [(self.offset, 0)]
if self.body_reader is None:
((offset, length),) = self.body_chunks
self.buffer.extend(text)
self.offset = len(self.buffer)
self.body_chunks = ((offset, length + len(text)),)
text = ''
return text
def close(self):
"""Mark the end of the input stream and finish parsing."""
if (self.body_reader is None and self.mode == 'body'):
self.mode = 'end'
elif self.mode != 'end':
if self.body_chunks:
# check for incomplete in body_chunks
offset, length = self.body_chunks.pop()
position = len(self.buffer)
length = min(length, position - offset)
self.body_chunks.append((offset, length))
self.mode = 'incomplete'
def headers_complete(self):
"""Check whether the input stream has finished supplying headers."""
return self.mode in ('end', 'body')
def complete(self):
"""Checks whether the input stream is at the end, i.e. if the parser
is expecting no more input."""
return self.mode == 'end'
def feed_line(self, text):
"""Feed text into the buffer, returning the first line found (if found
yet)"""
self.buffer.extend(text)
pos = self.buffer.find(b'\n', self.offset)
if pos > -1:
pos += 1
text = bytes(self.buffer[pos:])
del self.buffer[pos:]
line = bytes(self.buffer[self.offset:])
self.offset = len(self.buffer)
else:
line = None
text = b''
return line, text
def feed_length(self, text, remaining):
"""Feed (at most remaining bytes) text to buffer, returning
leftovers."""
body, text = text[:remaining], text[remaining:]
remaining -= len(body)
self.buffer.extend(body)
self.offset = len(self.buffer)
return remaining, text
def feed_start(self, text):
"""Feed text to the parser while it is in the 'start' state."""
line, text = self.feed_line(text)
if line is not None:
if line not in NEWLINES:
self.header.set_start_line(line)
self.mode = 'headers'
return text
def feed_headers(self, text):
"""Feed text to the parser while it is in the 'headers'
state."""
while text:
line, text = self.feed_line(text)
if line is not None:
self.header.add_header_line(line)
if line in NEWLINES:
self.mode = 'body'
break
return text
def get_message(self):
"""Returns the contents of the input buffer."""
return bytes(self.buffer)
def get_decoded_message(self):
"""Return the input stream reconstructed from the parsed
data."""
buf = bytearray()
self.write_decoded_message(buf)
return bytes(buf)
def write_message(self, buf):
#TODO: No idea what this does, looks broken
self.header.write(buf)
buf.extend(b'\r\n')
self.write_body(buf)
def write_decoded_message(self, buf):
"""Writes the parsed data to the buffer passed."""
self.header.write_decoded(buf)
if self.header.has_body():
length = sum(l for o, l in self.body_chunks)
buf.extend(b'Content-Length: ' + str(length).encode('ascii') + b'\r\n')
body = self.get_body()
if self.header.encoding and body:
try:
body = zlib.decompress(body)
except zlib.error:
try:
body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
except zlib.error:
encoding_header = b"Content-Encoding: " + self.header.encoding + b"\r\n"
buf.extend(encoding_header)
buf.extend(b'\r\n')
try:
buf.extend(body)
except Exception as e:
raise Exception('buf={} body={} e={}'.format(repr(buf), repr(body), e))
def get_body(self):
"""Returns the body of the HTTP message."""
buf = bytearray()
self.write_body(buf)
return bytes(buf)
def write_body(self, buf):
"""Writes the body of the HTTP message to the passed
buffer."""
for offset, length in self.body_chunks:
buf.extend(self.buffer[offset:offset + length])
class ChunkReader(object):
"""Reads the body of a HTTP message with chunked encoding."""
def __init__(self):
self.mode = "start"
self.start = True
self.remaining = 0
def feed_predict(self):
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'chunk':
if self.remaining == 0:
return None, '\r\n'
else:
return self.remaining, None
elif self.mode == 'trailer':
return None, '\r\n'
elif self.mode == 'end':
return 0, None
def feed_start(self, parser, text):
"""Feed text into the ChunkReader when the mode is 'start'."""
pos = len(parser.buffer)
line, text = parser.feed_line(text)
offset = len(parser.buffer)
if line is not None:
try:
chunk = int(line.split(b';', 1)[0], 16)
except ValueError:
# ugh, this means the chunk is probably not a chunk
if self.start:
# undo, stip text from buffer
del parser.buffer[pos:]
parser.offset = len(parser.buffer)
raise BrokenChunks()
else:
raise
parser.body_chunks.append((offset, chunk))
self.remaining = chunk
if chunk == 0:
self.mode = 'trailer'
else:
self.mode = 'chunk'
self.start = False
return text
def feed_chunk(self, parser, text):
"""Feed text into the ChunkReader when the mode is 'chunk'."""
if self.remaining > 0:
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining == 0:
end_of_chunk, text = parser.feed_line(text)
if end_of_chunk:
self.mode = 'start'
return text
def feed_trailer(self, parser, text):
"""Feed text into the ChunkReader when the mode is
'trailer'."""
line, text = parser.feed_line(text)
if line is not None:
parser.header.add_trailer_line(line)
if line in NEWLINES:
self.mode = 'end'
return text
def feed(self, parser, text):
"""Feed text into the ChunkReader."""
while text:
if self.mode == 'start':
text = self.feed_start(parser, text)
if text and self.mode == 'chunk':
text = self.feed_chunk(parser, text)
if text and self.mode == 'trailer':
text = self.feed_trailer(parser, text)
if self.mode == 'end':
parser.mode = 'end'
break
return text
class LengthReader(object):
def __init__(self, length):
self.remaining = length
def feed_predict(self):
return self.remaining, None
def feed(self, parser, text):
if self.remaining > 0:
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining <= 0:
parser.mode = 'end'
return text
class ZipLengthReader(LengthReader):
"""
Tries to read the body as gzip according to length. In case that fails, it
disregards the Content-Length and reads it normally.
"""
def __init__(self, length, text):
# TODO test if this works with gzipped responses in WARC
try:
self._file = GzipFile(fileobj=BytesIO(text[:length]), mode='rb')
self._text = self._file.read()
super(ZipLengthReader, self).__init__(len(self._text))
except IOError:
self._file = None
super(ZipLengthReader, self).__init__(len(text))
def __del__(self):
if self._file:
self._file.close()
def feed(self, parser, text):
"""Parse the body according to remaining length"""
if self.remaining > 0:
if self._file:
text = self._text
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining <= 0:
parser.mode = 'end'
return text
class HTTPHeader(object):
STRIP_HEADERS = [n.lower() for n in (b'Content-Length',
b'Transfer-Encoding', b'Content-Encoding',
b'TE', b'Expect', b'Trailer')]
def __init__(self, ignore_headers):
self.headers = []
self.keep_alive = False
self.mode = 'close'
self.content_length = None
self.encoding = None
self.trailers = []
self.expect_continue = False
self.ignore_headers = set(x.lower() for x in ignore_headers)
def has_body(self):
pass
def set_start_line(self, line):
pass
def write_decoded(self, buf):
self.write_decoded_start(buf)
strip_headers = self.STRIP_HEADERS if self.has_body() else ()
self.write_headers(buf, strip_headers)
def write_decoded_start(self, buf):
pass
def write_headers(self, buf, strip_headers=()):
for k, v in self.headers:
if k.lower() not in strip_headers:
buf.extend(k + b': ' + v + b'\r\n')
for k, v in self.trailers:
if k.lower() not in strip_headers:
buf.extend(k + b': ' + v + b'\r\n')
def add_trailer_line(self, line):
if line.startswith(b' ') or line.startswith(b'\t'):
k, v = self.trailers.pop()
line = line.strip()
v = v + b' ' + line
self.trailers.append((k, v))
elif line in NEWLINES:
pass
else:
name, value = line.split(b':', 1)
name = name.strip()
value = value.strip()
self.trailers.append((name, value))
def add_header(self, name, value):
self.headers.append((name, value))
def add_header_line(self, line):
if line.startswith(b' ') or line.startswith(b'\t'):
k, v = self.headers.pop()
line = line.strip()
v = v + b' ' + line
self.add_header(k, v)
elif line in NEWLINES:
for name, value in self.headers:
name = name.lower()
value = value.lower()
# todo handle multiple instances
# of these headers
if name in self.ignore_headers:
#print >> sys.stderr, 'ignore', name
pass
elif name == b'expect':
if b'100-continue' in value:
self.expect_continue = True
elif name == b'content-length':
if self.mode == 'close':
self.content_length = int(value)
self.mode = 'length'
elif name == b'transfer-encoding':
if b'chunked' in value:
self.mode = 'chunked'
elif name == b'content-encoding':
self.encoding = value
elif name == b'connection':
if b'keep-alive' in value:
self.keep_alive = True
elif b'close' in value:
self.keep_alive = False
else:
name, value = line.split(b':', 1)
name = name.strip()
value = value.strip()
self.add_header(name, value)
def body_is_chunked(self):
return self.mode == 'chunked'
def body_length(self):
if self.mode == 'length':
return self.content_length
url_rx = re.compile(
b'(?P<scheme>https?)://(?P<authority>(?P<host>[^:/]+)(?::(?P<port>\d+))?)'
b'(?P<path>.*)',
re.I)
class RequestHeader(HTTPHeader):
def __init__(self, ignore_headers=()):
HTTPHeader.__init__(self, ignore_headers=ignore_headers)
self.method = ''
self.target_uri = ''
self.version = ''
self.host = ''
self.scheme = 'http'
self.port = 80
self.host = ''
@property
def url(self):
if (self.scheme == 'http' and self.port == 80)\
or (self.scheme == 'https' and self.port == 80):
return "%s://%s%s"%(self.scheme, self.host, self.target_uri)
else:
return "%s://%s:%s%s"%(self.scheme, self.host, self.port, self.target_uri)
def add_header(self, name, value):
if name.lower() == b'host':
if b':' in value:
self.host, self.port = value.split(b':',1)
else:
self.host = value
return HTTPHeader.add_header(self, name, value)
def set_start_line(self, line):
self.method, self.target_uri, self.version = \
line.rstrip().split(b' ', 2)
if self.method.upper() == b"CONNECT":
# target_uri = host:port
self.host, self.port = self.target_uri.split(b':')
else:
match = url_rx.match(self.target_uri)
if match:
#self.add_header('Host', match.group('authority'))
self.target_uri = match.group('path')
self.host = match.group('host')
port = match.group('port')
self.port = int(port) if port else 80
self.scheme = match.group('scheme')
if not self.target_uri:
if self.method.upper() == 'OPTIONS':
self.target_uri = '*'
else:
self.target_uri = '/'
if self.version == 'HTTP/1.0':
self.keep_alive = False
def has_body(self):
return self.mode in ('chunked', 'length')
def write_decoded_start(self, buf):
buf.extend(self.method + b' ' + self.target_uri + b' ' + self.version + b'\r\n')
class ResponseHeader(HTTPHeader):
def __init__(self, request=None, ignore_headers=()):
HTTPHeader.__init__(self, ignore_headers=ignore_headers)
self.request = request
self.version = b"HTTP/1.1"
self.code = 0
self.phrase = "Empty Response"
@property
def method(self):
return self.request.method
@property
def url(self):
return self.request.url
@property
def host(self):
return self.request.host
@property
def port(self):
return self.request.port
@property
def scheme(self):
return self.request.scheme
def set_start_line(self, line):
parts = line.rstrip().split(b' ', 2)
self.version, self.code = parts[:2]
self.phrase = parts[2] if len(parts) >= 3 else b""
self.code = int(self.code)
if self.version == b'HTTP/1.0':
self.keep_alive = False
def has_body(self):
if self.request and self.request.method in Methods.no_body:
return False
elif self.code in Codes.no_body:
return False
return True
def write_decoded_start(self, buf):
buf.extend(self.version + b' ' + str(self.code).encode('ascii') + b' ' + self.phrase + b'\r\n')
class RequestMessage(HTTPMessage):
CONTENT_TYPE = HTTPMessage.CONTENT_TYPE + b";msgtype=request"
def __init__(self, ignore_headers=()):
HTTPMessage.__init__(self,
RequestHeader(ignore_headers=ignore_headers))
class ResponseMessage(HTTPMessage):
CONTENT_TYPE = HTTPMessage.CONTENT_TYPE + b";msgtype=response"
def __init__(self, request, ignore_headers=()):
self.interim = []
HTTPMessage.__init__(self,
ResponseHeader(request.header,
ignore_headers=ignore_headers))
def got_continue(self):
return bool(self.interim)
@property
def code(self):
return self.header.code
def feed(self, text):
text = HTTPMessage.feed(self, text)
if self.complete() and self.header.code == Codes.Continue:
self.interim.append(self.header)
self.header = ResponseHeader(self.header.request)
self.body_chunks = []
self.mode = 'start'
self.body_reader = None
text = HTTPMessage.feed(self, text)
return text
def as_http09(self):
return HTTP09Response(self)
class HTTP09ResponseHeader(HTTPHeader):
def __init__(self, request=None, ignore_headers=()):
HTTPHeader.__init__(self, ignore_headers=ignore_headers)
self.request = request
self.version = "HTTP/0.9"
self.code = 200
self.phrase = ""
@property
def method(self):
return self.request.method
@property
def url(self):
return self.request.url
@property
def host(self):
return self.request.host
@property
def port(self):
return self.request.port
@property
def scheme(self):
return self.request.scheme
def has_body(self):
return True
class HTTP09Response(HTTPMessage):
CONTENT_TYPE = "%s;msgtype=response;version=0.9" % HTTPMessage.CONTENT_TYPE
def __init__(self, response):
header= HTTP09ResponseHeader(response.header.request)
HTTPMessage.__init__(self, header, buf=response.buffer, offset=response.offset)
self.mode = 'body'
@property
def code(self):
return self.header.code
def feed_predict(self):
"""returns size, terminator request for input. size is 0 means end. """
return -1, None
def feed(self, text):
"""Push more text from the input stream into the parser."""
self.buffer.extend(text)
return ''
def close(self):
"""Mark the end of the input stream and finish parsing."""
self.mode = 'end'
def get_message(self):
"""Returns the contents of the input buffer."""
return bytes(self.buffer)
def get_decoded_message(self):
"""Return the input stream reconstructed from the parsed
data."""
return bytes(self.buffer)
def write_decoded_message(self, buf):
"""Writes the parsed data to the buffer passed."""
buf.extend(self.buffer)
def get_body(self):
"""Returns the body of the HTTP message."""
return bytes(self.buffer)
def write_body(self, buf):
buf.extend(self.buffer)
| {
"repo_name": "internetarchive/warctools",
"path": "hanzo/httptools/messaging.py",
"copies": "1",
"size": "23083",
"license": "mit",
"hash": -8086118138200177000,
"line_mean": 30.1932432432,
"line_max": 103,
"alpha_frac": 0.5278776589,
"autogenerated": false,
"ratio": 4.1696170520231215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5197494710923121,
"avg_score": null,
"num_lines": null
} |
"""A set of tasks for SVD filtering the m-modes."""
import numpy as np
import scipy.linalg as la
from caput import config
from draco.core import task, containers
class SVDSpectrumEstimator(task.SingleTask):
"""Calculate the SVD spectrum of a set of m-modes.
Attributes
----------
niter : int
Number of iterations of EM to perform.
"""
niter = config.Property(proptype=int, default=5)
def process(self, mmodes):
"""Calculate the spectrum.
Parameters
----------
mmodes : containers.MModes
MModes to find the spectrum of.
Returns
-------
spectrum : containers.SVDSpectrum
"""
mmodes.redistribute("m")
vis = mmodes.vis[:]
weight = mmodes.weight[:]
nmode = min(vis.shape[1] * vis.shape[3], vis.shape[2])
spec = containers.SVDSpectrum(singularvalue=nmode, axes_from=mmodes)
spec.spectrum[:] = 0.0
for mi, m in vis.enumerate(axis=0):
self.log.debug("Calculating SVD spectrum of m=%i", m)
vis_m = (
vis[mi].view(np.ndarray).transpose((1, 0, 2)).reshape(vis.shape[2], -1)
)
weight_m = (
weight[mi]
.view(np.ndarray)
.transpose((1, 0, 2))
.reshape(vis.shape[2], -1)
)
mask_m = weight_m == 0.0
u, sig, vh = svd_em(vis_m, mask_m, niter=self.niter)
spec.spectrum[m] = sig
return spec
class SVDFilter(task.SingleTask):
"""SVD filter the m-modes to remove the most correlated components.
Attributes
----------
niter : int
Number of iterations of EM to perform.
local_threshold : float
Cut out modes with singular value higher than `local_threshold` times the
largest mode on each m.
global_threshold : float
Remove modes with singular value higher than `global_threshold` times the
largest mode on any m
"""
niter = config.Property(proptype=int, default=5)
global_threshold = config.Property(proptype=float, default=1e-3)
local_threshold = config.Property(proptype=float, default=1e-2)
def process(self, mmodes):
"""Filter MModes using an SVD.
Parameters
----------
mmodes : container.MModes
Returns
-------
mmodes : container.MModes
"""
from mpi4py import MPI
mmodes.redistribute("m")
vis = mmodes.vis[:]
weight = mmodes.weight[:]
sv_max = 0.0
# TODO: this should be changed such that it does all the computation in
# a single SVD pass.
# Do a quick first pass calculation of all the singular values to get the max on this rank.
for mi, m in vis.enumerate(axis=0):
vis_m = (
vis[mi].view(np.ndarray).transpose((1, 0, 2)).reshape(vis.shape[2], -1)
)
weight_m = (
weight[mi]
.view(np.ndarray)
.transpose((1, 0, 2))
.reshape(vis.shape[2], -1)
)
mask_m = weight_m == 0.0
u, sig, vh = svd_em(vis_m, mask_m, niter=self.niter)
sv_max = max(sig[0], sv_max)
# Reduce to get the global max.
global_max = mmodes.comm.allreduce(sv_max, op=MPI.MAX)
self.log.debug("Global maximum singular value=%.2g", global_max)
import sys
sys.stdout.flush()
# Loop over all m's and remove modes below the combined cut
for mi, m in vis.enumerate(axis=0):
vis_m = (
vis[mi].view(np.ndarray).transpose((1, 0, 2)).reshape(vis.shape[2], -1)
)
weight_m = (
weight[mi]
.view(np.ndarray)
.transpose((1, 0, 2))
.reshape(vis.shape[2], -1)
)
mask_m = weight_m == 0.0
u, sig, vh = svd_em(vis_m, mask_m, niter=self.niter)
# Zero out singular values below the combined mode cut
global_cut = (sig > self.global_threshold * global_max).sum()
local_cut = (sig > self.local_threshold * sig[0]).sum()
cut = max(global_cut, local_cut)
sig[:cut] = 0.0
# Recombine the matrix
vis_m = np.dot(u, sig[:, np.newaxis] * vh)
# Reshape and write back into the mmodes container
vis[mi] = vis_m.reshape(vis.shape[2], 2, -1).transpose((1, 0, 2))
return mmodes
def svd_em(A, mask, niter=5, rank=5, full_matrices=False):
"""Perform an SVD with missing entries using Expectation-Maximisation.
This assumes that the matrix is well approximated by only a few modes in
order fill the missing entries. This is probably not a proper EM scheme, but
is not far off.
Parameters
----------
A : np.ndarray
Matrix to SVD.
mask : np.ndarray
Boolean array of masked values. Missing values are `True`.
niter : int, optional
Number of iterations to perform.
rank : int, optional
Set the rank of the approximation used to fill the missing values.
full_matrices : bool, optional
Return the full span of eigenvectors and values (see `scipy.linalg.svd`
for a fuller description).
Returns
-------
u, sig, vh : np.ndarray
The singular values and vectors.
"""
# Do an initial fill of the missing entries
A = A.copy()
A[mask] = np.median(A[~mask])
# Perform cycles of calculating the SVD with the current guess for the
# missing values, then forming a new estimate of the missing values using a
# low rank approximation.
for i in range(niter):
u, sig, vh = la.svd(A, full_matrices=full_matrices, overwrite_a=False)
low_rank_A = np.dot(u[:, :rank] * sig[:rank], vh[:rank])
A[mask] = low_rank_A[mask]
return u, sig, vh
| {
"repo_name": "radiocosmology/draco",
"path": "draco/analysis/svdfilter.py",
"copies": "1",
"size": "6005",
"license": "mit",
"hash": 7429625756180858000,
"line_mean": 28.0096618357,
"line_max": 99,
"alpha_frac": 0.559367194,
"autogenerated": false,
"ratio": 3.803039898670044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9860454753732708,
"avg_score": 0.00039046778746711975,
"num_lines": 207
} |
"""A set of test cases for the wemake isort profile.
Snippets are taken directly from the wemake-python-styleguide project here:
https://github.com/wemake-services/wemake-python-styleguide
"""
from functools import partial
from ..utils import isort_test
wemake_isort_test = partial(
isort_test, profile="wemake", known_first_party=["wemake_python_styleguide"]
)
def test_wemake_snippet_one():
wemake_isort_test(
"""
import ast
import tokenize
import traceback
from typing import ClassVar, Iterator, Sequence, Type
from flake8.options.manager import OptionManager
from typing_extensions import final
from wemake_python_styleguide import constants, types
from wemake_python_styleguide import version as pkg_version
from wemake_python_styleguide.options.config import Configuration
from wemake_python_styleguide.options.validation import validate_options
from wemake_python_styleguide.presets.types import file_tokens as tokens_preset
from wemake_python_styleguide.presets.types import filename as filename_preset
from wemake_python_styleguide.presets.types import tree as tree_preset
from wemake_python_styleguide.transformations.ast_tree import transform
from wemake_python_styleguide.violations import system
from wemake_python_styleguide.visitors import base
VisitorClass = Type[base.BaseVisitor]
"""
)
def test_wemake_snippet_two():
wemake_isort_test(
"""
from collections import defaultdict
from typing import ClassVar, DefaultDict, List
from flake8.formatting.base import BaseFormatter
from flake8.statistics import Statistics
from flake8.style_guide import Violation
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
from typing_extensions import Final
from wemake_python_styleguide.version import pkg_version
#: That url is generated and hosted by Sphinx.
DOCS_URL_TEMPLATE: Final = (
'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
)
"""
)
def test_wemake_snippet_three():
wemake_isort_test(
"""
import ast
from pep8ext_naming import NamingChecker
from typing_extensions import final
from wemake_python_styleguide.transformations.ast.bugfixes import (
fix_async_offset,
fix_line_number,
)
from wemake_python_styleguide.transformations.ast.enhancements import (
set_if_chain,
set_node_context,
)
@final
class _ClassVisitor(ast.NodeVisitor): ...
"""
)
| {
"repo_name": "PyCQA/isort",
"path": "tests/unit/profiles/test_wemake.py",
"copies": "1",
"size": "2438",
"license": "mit",
"hash": 1588545796122001200,
"line_mean": 27.0229885057,
"line_max": 80,
"alpha_frac": 0.7863002461,
"autogenerated": false,
"ratio": 3.6827794561933533,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49690797022933536,
"avg_score": null,
"num_lines": null
} |
"""A set of timestampable model mixins"""
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class CreatedAtMixin(models.Model):
"""Add ``created_at`` field to model."""
created_at = models.DateTimeField(auto_now_add=True,
null=False, blank=True,
verbose_name=_('Creation date'))
class Meta(object):
abstract = True
class CreatedByMixin(models.Model):
"""Add ``created_by`` field to model."""
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
db_column='created_by',
on_delete=models.CASCADE,
null=False, blank=True,
related_name='%(app_label)s_%(class)s_created',
verbose_name=_('Created by'))
class Meta(object):
abstract = True
class UpdatedAtMixin(models.Model):
"""Add ``updated_at`` field to model."""
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True,
verbose_name=_('Date of last update'))
class Meta(object):
abstract = True
class UpdatedByMixin(models.Model):
"""Add ``updated_by`` field to model."""
updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
db_column='updated_by',
on_delete=models.CASCADE,
null=True, blank=True,
related_name='%(app_label)s_%(class)s_updated',
verbose_name=_('Updated by'))
class Meta(object):
abstract = True
def save_by(self, user, force_insert=False, force_update=False, using=None,
update_fields=None):
self.updated_by = user
return self.save(force_insert, force_update, using, update_fields)
class DeletedAtMixin(models.Model):
"""Add ``deleted_at`` field to model."""
deleted_at = models.DateTimeField(null=True, blank=True,
verbose_name=_('Removal date'))
class Meta(object):
abstract = True
def delete(self, *args, **kwargs):
self.deleted_at = timezone.now()
self.save()
class DeletedByMixin(models.Model):
"""Add ``deleted_by`` field to model."""
deleted_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
db_column='deleted_by',
on_delete=models.CASCADE,
null=True, blank=True,
related_name='%(app_label)s_%(class)s_deleted',
verbose_name=_('Deleted by'))
class Meta(object):
abstract = True
def delete(self, *args, **kwargs):
pass
def delete_by(self, user, using=None):
self.deleted_by = user
self.delete(using)
class CreatedMixin(CreatedAtMixin, CreatedByMixin):
"""Add ``created_at`` and ``created_by`` fields to model."""
class Meta(object):
abstract = True
class UpdatedMixin(UpdatedAtMixin, UpdatedByMixin):
"""Add ``updated_at`` and ``updated_by`` fields to model."""
class Meta(object):
abstract = True
class DeletedMixin(DeletedAtMixin, DeletedByMixin):
"""Add ``deleted_at`` and ``deleted_by`` fields to model."""
class Meta(object):
abstract = True
| {
"repo_name": "tomi77/python-t77-django",
"path": "django_extra_tools/db/models/timestampable.py",
"copies": "1",
"size": "3266",
"license": "mit",
"hash": -2969526162069495000,
"line_mean": 28.4234234234,
"line_max": 79,
"alpha_frac": 0.6071647275,
"autogenerated": false,
"ratio": 3.9732360097323602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.508040073723236,
"avg_score": null,
"num_lines": null
} |
""" A set of tokens and convienence functions for input/output files.
author: Brian Schrader
since: 2015-12-28
"""
from __future__ import print_function
from collections import namedtuple
import glob, re
file_pattern = 'mp.{}.output{}'
alias_pattern = '{command}-{output_number}'
class PathToken(object):
""" A model for a given path. """
def __init__(self, alias, path):
self.alias = alias
self.path = path
def __repr__(self):
return '<Path {}: {}>'.format(self.alias, self.path)
def __eq__(self, other):
try:
return (self.alias == other.alias or
self.path == other.path)
except AttributeError:
return False
def eval(self):
return self.path
class CommentToken(object):
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return '<Comment: {}>'.format(''.join(self.parts))
def __eq__(self, other):
return ''.join(self.parts) == ''.join(other.parts)
def eval(self):
return '{}\n'.format(''.join(self.parts))
class FileToken(object):
""" An abc for input/output data classes. Provides various common
methods.
Warning: This class should not be used directly.
"""
def __init__(self, alias, filename='', cwd=''):
self.alias = alias
self.filename = filename
if len(cwd) > 0 and cwd[-1] != '/':
cwd += '/'
self.cwd = cwd
def __eq__(self, other):
try:
return (self.alias == other.alias or
self.filename == other.filename)
except AttributeError:
return False
def __hash__(self):
return hash(self.alias)
@property
def path(self):
return '{}{}'.format(self.cwd, self.filename)
class Input(FileToken):
""" A model of a single input to a given command. Input tokens can be
evaluated to obtain their actual filename(s).
"""
def __init__(self, alias, filename='', cwd='', and_or=''):
super(Input, self).__init__(alias, filename, cwd)
self.and_or = and_or
def __repr__(self):
try:
eval = self.eval()
except Exception:
eval = '?'
return '<Input: {}->[{}]{}>'.format(self.alias, eval,
' _{}_'.format(self.and_or) if self.and_or else '')
def fuzzy_match(self, other):
""" Given another token, see if either the major alias identifier
matches the other alias, or if magic matches the alias.
"""
magic, fuzzy = False, False
try:
magic = self.alias == other.magic
except AttributeError:
pass
if '.' in self.alias:
major = self.alias.split('.')[0]
fuzzy = major == other.alias
return magic or fuzzy
def eval(self):
""" Evaluates the given input and returns a string containing the
actual filenames represented. If the input token represents multiple
independent files, then eval will return a list of all the input files
needed, otherwise it returns the filenames in a string.
"""
if self.and_or == 'or':
return [Input(self.alias, file, self.cwd, 'and')
for file in self.files]
return ' '.join(self.files)
@property
def command_alias(self):
""" Returns the command alias for a given input. In most cases this
is just the input's alias but if the input is one of many, then
`command_alias` returns just the beginning of the alias cooresponding to
the command's alias.
"""
if '.' in self.alias:
return self.alias.split('-')[0]
return None
@property
def is_magic(self):
try:
return isinstance(self.eval(), list)
except ValueError:
return False
@property
def is_glob(self):
return '*' in self.filename
@property
def magic_path(self):
match = file_pattern.format(self.alias, '*')
return '{}{}'.format(self.cwd, match)
@property
def files(self):
""" Returns a list of all the files that match the given
input token.
"""
res = None
if not res:
res = glob.glob(self.path)
if not res and self.is_glob:
res = glob.glob(self.magic_path)
if not res:
res = glob.glob(self.alias)
if not res:
raise ValueError('No files match. %s' % self)
return res
@staticmethod
def from_string(string, _or=''):
""" Parse a given string and turn it into an input token. """
if _or:
and_or = 'or'
else:
and_or = ''
return Input(string, and_or=and_or)
class Output(FileToken):
""" A model of a single output to a given command. Output tokens can be
evaluated to obtain their actual filename(s).
"""
def __init__(self, alias, filename='', cwd='', magic=''):
super(Output, self).__init__(alias, filename, cwd)
self.ext = ''
self.magic = ''
self._clean(magic)
def __repr__(self):
return '<Output: {}->[{}]{} {}>'.format(self.alias, self.eval(),
(' ' + self.magic) if self.magic else '', self.ext)
def __eq__(self, other):
""" Overrides the token eq to allow for magic : alias comparison for
magic inputs. Defaults to the super() eq otherwise.
"""
try:
return (self.magic == other.alias or
super(Output, self).__eq__(other))
except AttributeError:
return False
def eval(self):
""" Returns a filename to be used for script output. """
if self.magic:
return self.magic
if not self.filename:
return file_pattern.format(self.alias, self.ext)
return self.path
def as_input(self):
""" Returns an input token for the given output. """
return Input(self.alias, self.eval())
def _clean(self, magic):
""" Given a magic string, remove the output tag designator. """
if magic.lower() == 'o':
self.magic = ''
elif magic[:2].lower() == 'o:':
self.magic = magic[2:]
elif magic[:2].lower() == 'o.':
self.ext = magic[1:]
@staticmethod
def from_string(string):
""" Parse a given string and turn it into an output token. """
return Output('', magic=string)
| {
"repo_name": "TorkamaniLab/metapipe",
"path": "metapipe/models/tokens.py",
"copies": "2",
"size": "6534",
"license": "mit",
"hash": -756490810058993900,
"line_mean": 27.9115044248,
"line_max": 80,
"alpha_frac": 0.5531068258,
"autogenerated": false,
"ratio": 4.183098591549296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5736205417349296,
"avg_score": null,
"num_lines": null
} |
"""A set of tools for use in integration tests."""
import os
from hashlib import sha1
import numpy as np
import tables
def hasher(x):
return int(sha1(x).hexdigest(), 16)
def idx(h):
ind = [None] * 5
for i in range(4, -1, -1):
h, ind[i] = divmod(h, 2**32)
return tuple(ind)
sha1array = lambda x: np.array(idx(hasher(x)), np.uint32)
def table_exist(db, tables):
"""Checks if hdf5 database contains the specified tables.
"""
return all([t in db.root for t in tables])
def find_ids(data, data_table, id_table):
"""Finds ids of the specified data located in the specified data_table,
and extracts the corresponding id from the specified id_table.
"""
ids = []
for i, d in enumerate(data_table):
if isinstance(d, np.ndarray) and isinstance(data, np.ndarray):
if (d == data).all():
ids.append(id_table[i])
elif isinstance(d, np.ndarray) and not isinstance(data, np.ndarray):
if (d == sha1array(data)).all():
ids.append(id_table[i])
elif d == data:
ids.append(id_table[i])
return ids
def exit_times(agent_id, exit_table):
"""Finds exit times of the specified agent from the exit table.
"""
i = 0
exit_times = []
for index in exit_table["AgentId"]:
if index == agent_id:
exit_times.append(exit_table["ExitTime"][i])
i += 1
return exit_times
def create_sim_input(ref_input, k_factor_in, k_factor_out):
"""Creates xml input file from a reference xml input file.
Changes k_factor_in and k_factor_out in a simulation input
files for KFacility.
Args:
ref_input: A reference xml input file with k_factors.
k_factor_in: A new k_factor for requests.
k_factor_out: A new conversion factor for offers.
Returns:
A path to the created file. It is created in the same
directory as the reference input file.
"""
# File to be created
fw_path = ref_input.split(".xml")[0] + "_" + str(k_factor_in) + \
"_" + str(k_factor_out) + ".xml"
fw = open(fw_path, "w")
fr = open(ref_input, "r")
for f in fr:
if f.count("k_factor_in"):
f = f.split("<")[0] + "<k_factor_in>" + str(k_factor_in) + \
"</k_factor_in>\n"
elif f.count("k_factor_out"):
f = f.split("<")[0] + "<k_factor_out>" + str(k_factor_out) + \
"</k_factor_out>\n"
fw.write(f)
# Closing open files
fr.close()
fw.close()
return fw_path
| {
"repo_name": "cyclus/cycaless",
"path": "tests/helper.py",
"copies": "1",
"size": "2575",
"license": "bsd-3-clause",
"hash": 4242726598770658300,
"line_mean": 28.9418604651,
"line_max": 76,
"alpha_frac": 0.5763106796,
"autogenerated": false,
"ratio": 3.374836173001311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4451146852601311,
"avg_score": null,
"num_lines": null
} |
"""A set of tools for use in integration tests."""
import os
import platform
import sqlite3
from hashlib import sha1
import numpy as np
import tables
def hasher(x):
return int(sha1(x.encode()).hexdigest(), 16)
def idx(h):
ind = [None] * 5
for i in range(4, -1, -1):
h, ind[i] = divmod(h, 2**32)
return tuple(ind)
sha1array = lambda x: np.array(idx(hasher(x)), np.uint32)
h5out = "output_temp.h5"
sqliteout = "output_temp.sqlite"
def clean_outs():
"""Removes output files if they exist."""
if os.path.exists(h5out):
os.remove(h5out)
if os.path.exists(sqliteout):
os.remove(sqliteout)
def which_outfile():
"""Uses sqlite if platform is Mac, otherwise uses hdf5
"""
return h5out if platform.system() == 'Linux' else sqliteout
def tables_exist(outfile, table_names):
"""Checks if output database contains the specified tables.
"""
if outfile == h5out:
f = tables.open_file(outfile, mode = "r")
res = all([t in f.root for t in table_names])
f.close()
return res
else:
table_names = [t.replace('/', '') for t in table_names]
conn = sqlite3.connect(outfile)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
exc = cur.execute
res = all([bool(exc('SELECT * From sqlite_master WHERE name = ? ', \
(t, )).fetchone()) for t in table_names])
conn.close()
return res
def find_ids(data, data_table, id_table):
"""Finds ids of the specified data located in the specified data_table,
and extracts the corresponding id from the specified id_table.
"""
ids = []
for i, d in enumerate(data_table):
if isinstance(d, np.ndarray) and isinstance(data, np.ndarray):
if (d == data).all():
ids.append(id_table[i])
elif isinstance(d, np.ndarray) and not isinstance(data, np.ndarray):
if (d == sha1array(data)).all():
ids.append(id_table[i])
elif d == data:
ids.append(id_table[i])
return ids
def to_ary(a, k):
if which_outfile() == sqliteout:
return np.array([x[k] for x in a])
else:
return a[k]
def exit_times(agent_id, exit_table):
"""Finds exit times of the specified agent from the exit table.
"""
i = 0
exit_times = []
for index in exit_table["AgentId"]:
if index == agent_id:
exit_times.append(exit_table["ExitTime"][i])
i += 1
return exit_times
def agent_time_series(names):
"""Return a list of timeseries corresponding to the number of agents in a
Cyclus simulation
Parameters
----------
outfile : the output file (hdf5 or sqlite format)
names : list
the list of agent names
"""
if which_outfile() == h5out :
f = tables.open_file(h5out, mode = "r")
nsteps = f.root.Info.cols.Duration[:][0]
entries = {name: [0] * nsteps for name in names}
exits = {name: [0] * nsteps for name in names}
# Get specific tables and columns
agent_entry = f.get_node("/AgentEntry")[:]
agent_exit = f.get_node("/AgentExit")[:] if \
hasattr(f.root, 'AgentExit') else None
f.close()
else :
conn = sqlite3.connect(sqliteout)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
exc = cur.execute
nsteps = exc('SELECT MIN(Duration) FROM Info').fetchall()[0][0]
entries = {name: [0] * nsteps for name in names}
exits = {name: [0] * nsteps for name in names}
# Get specific tables and columns
agent_entry = exc('SELECT * FROM AgentEntry').fetchall()
agent_exit = exc('SELECT * FROM AgentExit').fetchall() \
if len(exc(
("SELECT * FROM sqlite_master WHERE "
"type='table' AND name='AgentExit'")).fetchall()) > 0 \
else None
conn.close()
# Find agent id
agent_ids = to_ary(agent_entry, "AgentId")
agent_type = to_ary(agent_entry, "Prototype")
agent_ids = {name: find_ids(name, agent_type, agent_ids) for name in names}
# entries per timestep
for name, ids in agent_ids.items():
for id in ids:
idx = np.where(to_ary(agent_entry,'AgentId') == id)[0]
entries[name][agent_entry[idx]['EnterTime']] += 1
# cumulative entries
entries = {k: [sum(v[:i+1]) for i in range(len(v))] \
for k, v in entries.items()}
if agent_exit is None:
return entries
# exits per timestep
for name, ids in agent_ids.items():
for id in ids:
idxs = np.where(to_ary(agent_exit,'AgentId') == id)[0]
if len(idxs) > 0:
exits[name][agent_exit[idxs[0]]['ExitTime']] += 1
# cumulative exits
exits = {k: [sum(v[:i+1]) for i in range(len(v))] \
for k, v in exits.items()}
# return difference
ret = {}
for name in names:
i = entries[name]
# shift by one to account for agents that enter/exit in the same
# timestep
o = [0] + exits[name][:-1]
ret[name] = [i - o for i, o in zip(i, o)]
return ret
def create_sim_input(ref_input, k_factor_in, k_factor_out):
"""Creates xml input file from a reference xml input file.
Changes k_factor_in and k_factor_out in a simulation input
files for KFacility.
Args:
ref_input: A reference xml input file with k_factors.
k_factor_in: A new k_factor for requests.
k_factor_out: A new conversion factor for offers.
Returns:
A path to the created file. It is created in the same
directory as the reference input file.
"""
# File to be created
fw_path = ref_input.split(".xml")[0] + "_" + str(k_factor_in) + \
"_" + str(k_factor_out) + ".xml"
fw = open(fw_path, "w")
fr = open(ref_input, "r")
for f in fr:
if f.count("k_factor_in"):
f = f.split("<")[0] + "<k_factor_in>" + str(k_factor_in) + \
"</k_factor_in>\n"
elif f.count("k_factor_out"):
f = f.split("<")[0] + "<k_factor_out>" + str(k_factor_out) + \
"</k_factor_out>\n"
fw.write(f)
# Closing open files
fr.close()
fw.close()
return fw_path
| {
"repo_name": "mbmcgarry/cyclus",
"path": "tests/helper.py",
"copies": "4",
"size": "6389",
"license": "bsd-3-clause",
"hash": -1504546109224630000,
"line_mean": 29.8647342995,
"line_max": 79,
"alpha_frac": 0.5620597903,
"autogenerated": false,
"ratio": 3.522050716648291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6084110506948291,
"avg_score": null,
"num_lines": null
} |
"""A set of tools for use in integration tests."""
import os
import tempfile
import subprocess
import sys
from hashlib import sha1
import numpy as np
import tables
from nose.tools import assert_equal
if sys.version_info[0] >= 3:
str_types = (bytes, str)
else:
str_types = (str, unicode)
def hasher(x):
return int(sha1(x.encode()).hexdigest(), 16)
def idx(h):
ind = [None] * 5
for i in range(4, -1, -1):
h, ind[i] = divmod(h, 2**32)
return tuple(ind)
sha1array = lambda x: np.array(idx(hasher(x)), np.uint32)
def table_exist(db, tables):
"""Checks if hdf5 database contains the specified tables.
"""
return all([t in db.root for t in tables])
def find_ids(data, data_table, id_table):
"""Finds ids of the specified data located in the specified data_table,
and extracts the corresponding id from the specified id_table.
"""
ids = []
for i, d in enumerate(data_table):
if isinstance(d, np.ndarray) and isinstance(data, np.ndarray):
if (d == data).all():
ids.append(id_table[i])
elif isinstance(d, np.ndarray) and not isinstance(data, np.ndarray):
if (d == sha1array(data)).all():
ids.append(id_table[i])
elif d == data:
ids.append(id_table[i])
return ids
def exit_times(agent_id, exit_table):
"""Finds exit times of the specified agent from the exit table.
"""
i = 0
exit_times = []
for index in exit_table["AgentId"]:
if index == agent_id:
exit_times.append(exit_table["ExitTime"][i])
i += 1
return exit_times
def run_cyclus(cyclus, cwd, in_path, out_path):
"""Runs cyclus with various inputs and creates output databases
"""
holdsrtn = [1] # needed because nose does not send() to test generator
# make sure the output target directory exists
cmd = [cyclus, "-o", out_path, "--input-file", in_path]
check_cmd(cmd, cwd, holdsrtn)
def check_cmd(args, cwd, holdsrtn):
"""Runs a command in a subprocess and verifies that it executed properly.
"""
if not isinstance(args, str_types):
args = " ".join(args)
print("TESTING: running command in {0}:\n\n{1}\n".format(cwd, args))
env = dict(os.environ)
env['_'] = subprocess.check_output(['which', 'cyclus'], cwd=cwd).strip()
with tempfile.NamedTemporaryFile() as f:
rtn = subprocess.call(args, shell=True, cwd=cwd, stdout=f, stderr=f, env=env)
if rtn != 0:
f.seek(0)
print("STDOUT + STDERR:\n\n" + f.read().decode())
holdsrtn[0] = rtn
assert_equal(rtn, 0)
| {
"repo_name": "rwcarlsen/cycamore",
"path": "tests/helper.py",
"copies": "9",
"size": "2628",
"license": "bsd-3-clause",
"hash": -8249522726482616000,
"line_mean": 31.0487804878,
"line_max": 85,
"alpha_frac": 0.6145357686,
"autogenerated": false,
"ratio": 3.360613810741688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004016217666005398,
"num_lines": 82
} |
"""A set of types as required by the DB API 2.0.
All these types implement str() in a way that makes them correct and
safe for inclusion in commands passed to the backend.
This module uses datetime from the stdlib, which means we need Python 2.3+
"""
LOCALE_MONEY_DEFAULTS = ('$',',','.')
# System imports
import datetime
import string
import re
# Sibling imports
from errors import *
# Single quote isn't "printable" for us.. we need to muck with it
_printable = string.printable.replace("'","").replace("\\","")
class NULL:
def __init__(self,v): pass
def __str__(self):
return "NULL"
_nchar_exp = re.compile(r'\\([0-9]{3}|\\)', re.MULTILINE)
class BINARY(str):
def __str__(self):
return "'" + "".join( [
(char in _printable and
char or
"\\\\%03o" % ord(char))
for char in str.__str__(self)]) + "'"
@staticmethod
def fromDatabase(s):
def replace(mo):
try:
return chr(int(mo.group(1),8))
except:
return '\\'
return re.sub(_nchar_exp, replace, s)
class STRING(str):
def __str__(self):
s = str.__str__(self)
s = s.replace("\\","\\" + "134")
return "'" + s.replace("'","''") + "'"
class UNICODE(STRING):
def __init__(self, v):
STRING.__init__(v.encode('utf-8'))
@staticmethod
def fromDatabase(s):
return STRING.fromDatabase(s).decode('utf-8')
class NUMBER:
def __init__(self,v):
if type(v) in (int, float, Decimal):
self.v = v
return
raise DataError, ("Cannot convert '%s' to number (int,float)" % v)
def __str__(self):
return str(self.v)
def __repr__(self):
return repr(self.v)
def __int__(self):
return int(self.v)
def __long__(self):
return long(self.v)
def __float__(self):
return float(self.v)
def __complex__(self):
return complex(self.v)
class ROWID(NUMBER): pass
class DATE(datetime.date):
def __init__(self, *args):
datetime.date.__init__(self, *args)
@staticmethod
def toDatabase(s):
return "'%04d-%02d-%02d'" % (s.year,s.month,s.day)
def __str__(self):
return DATE.toDatabase(self)
@staticmethod
def fromDatabase(s):
try:
parts = map(int,s.split("-"))
return datetime.date(*parts)
except:
raise DataError, ("Cannot convert string '%s' to datetime.date; only ISO-8601 output supported" % s)
class TIME(datetime.time):
def __init__(self, *args):
datetime.time.__init__(self, *args)
@staticmethod
def toDatabase(s):
return "'%02d:%02d:%02d.%06d'" % (s.hour, s.minute, s.second, s.microsecond)
def __str__(self):
return TIME.toDatabase(self)
@staticmethod
def fromDatabase(ins):
if ins.count("-"):
ins = ins.split("-")[0] #discarding timestamp for right tnow
try:
h,m,sparts = ins.split(":")
h,m = map(int,(h,m))
if sparts.count("."):
ssegs = sparts.split(".")
s,ms = map(int,ssegs)
if ms:
l = len(ssegs[1])
ms *= 10 ** (6 - l)
else:
s = int(sparts)
ms = 0
except:
raise DataError, ("Cannot convert string '%s' to datetime.time; only ISO-8601 output supported" % ins)
return datetime.time(h,m,s,ms)
class DATETIME(datetime.datetime):
def __init__(self,*args):
datetime.datetime.__init__(self,*args)
@staticmethod
def toDatabase(s):
return "'%s %s'" % (DATE.toDatabase(s)[1:-1], TIME.toDatabase(s)[1:-1])
def __str__(self):
return DATETIME.toDatabase(self)
@staticmethod
def fromDatabase(s):
p1,p2 = s.split(" ")
dt = DATE.fromDatabase(p1)
tm = TIME.fromDatabase(p2)
return datetime.datetime(dt.year,dt.month,dt.day,
tm.hour,tm.minute,tm.second,tm.microsecond)
class BOOL:
def __init__(self, b):
self.__b = b
def __str__(self):
if self.__b:
return "'T'"
else:
return "'F'"
@staticmethod
def fromDatabase(s):
if s.upper().startswith("T"):
return True
if s.upper().startswith("F"):
return False
raise DataError, ("Cannot convert '%s' to boolean" % s)
from decimal import Decimal
import locale
_loc = locale.getdefaultlocale()[0]
if _loc:
locale.setlocale(locale.LC_ALL, _loc)
_thou_sep = locale.localeconv()['mon_thousands_sep']
_cur_symbol = locale.localeconv()['currency_symbol']
_dec_point = locale.localeconv()['mon_decimal_point']
else:
_cur_symbol, _thou_sep, _dec_point = LOCALE_MONEY_DEFAULTS
class MONEY:
def __init__(self, v):
if not type(v) == Decimal:
raise DataError, ("Cannot convert '%s' to money; use Decimal" % type(v))
self.v = v
def __str__(self):
return "'%s%s'" % (_cur_symbol, self.v)
@staticmethod
def fromDatabase(s):
s = s.replace(_thou_sep, '').replace(_cur_symbol, '')
if _dec_point != '.':
s = s.replace(_dec_point, '.')
return Decimal(s)
| {
"repo_name": "jamwt/pgasync",
"path": "pgasync/pgtypes.py",
"copies": "1",
"size": "4583",
"license": "bsd-3-clause",
"hash": -6577440372823882000,
"line_mean": 21.356097561,
"line_max": 105,
"alpha_frac": 0.6264455597,
"autogenerated": false,
"ratio": 2.806491120636865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3932936680336865,
"avg_score": null,
"num_lines": null
} |
'''A set of utilities for defining and working with namsel-ocr config files
'''
import json
import codecs
import os
import glob
import numpy as np
from utils import create_unique_id, local_file
from collections import Counter
CONF_DIR = './confs'
if not os.path.exists(CONF_DIR):
os.mkdir(CONF_DIR)
def _open(fl, mode='r'):
return codecs.open(fl, mode, encoding='utf-8')
default_config = {
'page_type': 'book',
'line_break_method': 'line_cut',
'recognizer': 'hmm', # or probout
'break_width': 2.0,
'segmenter': 'stochastic', # or experimental
'combine_hangoff': .6,
'low_ink': False,
'line_cluster_pos': 'top', # or center
'viterbi_postprocessing': False, # determine if main is running using viterbi post processing
'postprocess': False, # Run viterbi (or possibly some other) post processing
'stop_line_cut': False,
'detect_o': False,
'clear_hr': False,
'line_cut_inflation': 4, # The number of iterations when dilating text in line cut. Increase this value when need to blob things together
}
def update_default():
json.dump(default_config, _open(os.path.join(CONF_DIR, 'default.conf'), 'w'), indent=1)
def create_misc_confs():
from sklearn.grid_search import ParameterGrid
params = {'break_width': [1.5, 2.0, 3.6, 5.0],
'recognizer': ['probout', 'hmm'], 'combine_hangoff': [.4, .6, .8],
'postprocess': [True, False], 'segmenter': ['experimental', 'stochastic'],
'line_cluster_pos': ['top', 'center'],
}
grid = ParameterGrid(params)
for pr in grid:
Config(save_conf=True, **pr)
class Config(object):
def __init__(self, path=None, save_conf=False, **kwargs):
self.conf = default_config
self.path = path
if path:
# Over-write defaults
self._load_json_set_conf(path)
# Set any manually specified config settings
for k in kwargs:
self.conf[k] = kwargs[k]
if kwargs and save_conf:
self._save_conf()
# Set conf params as attributes to conf obj
for k in self.conf:
if k not in self.__dict__:
setattr(self, k, self.conf[k])
def _load_json_set_conf(self, path):
try:
conf = json.load(_open(path))
for k in conf:
self.conf[k] = conf[k]
except IOError:
print 'Error in loading json file at %s. Using default config' % path
self.conf = default_config
def _save_conf(self):
'''Save a conf if it doesn't already exist'''
confs = glob.glob(os.path.join(CONF_DIR, '*.conf'))
for conf in confs:
conf = json.load(_open(conf))
if conf == self.conf:
return
else:
json.dump(self.conf, _open(os.path.join(CONF_DIR, create_unique_id()+'.conf'), 'w'), indent=1)
| {
"repo_name": "zmr/namsel",
"path": "config_manager.py",
"copies": "1",
"size": "3038",
"license": "mit",
"hash": 6732557589942570000,
"line_mean": 29.0792079208,
"line_max": 141,
"alpha_frac": 0.5681369322,
"autogenerated": false,
"ratio": 3.6470588235294117,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9562495156997508,
"avg_score": 0.03054011974638083,
"num_lines": 101
} |
'''A set of utilities for financial data analysis'''
import csv
from time import time
# Keeping track of a bundle of output files for, e.g., ITCH data
class ManyWriters:
'''Keep track of a set of files and formatters around a given base'''
writers = {}
open_files = []
def __init__(self, basename):
self.basename = basename
def __enter__(self):
return self
def create_writer(self, rec_type):
'''Create a new writer, and store it in the writers dict
rec_type : str
Will be combined with self.basename to determine filename
'''
outname = self.basename + '_' + rec_type + '.csv'
# csv.writer docs specify newline=''
outfile = open(outname, 'w', newline='')
self.open_files.append(outfile)
return csv.writer(outfile)
def get_writer(self, rec_type):
'''Get a writer for the specified rec_type, creating if needed
rec_type : str
Will be combined with self.basename to determine filename
'''
if rec_type not in self.writers:
self.writers[rec_type] = self.create_writer(rec_type)
return self.writers[rec_type]
def close_files(self):
for f in self.open_files:
f.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close_files()
# Benchmarking
def timeit(method):
'''Return a function that behaves the same, except it prints timing stats.
Lightly modified from Andreas Jung. Unlicensed, but simple enough it should
not be a license issue:
https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
'''
def timed(*args, **kw):
tstart = time()
result = method(*args, **kw)
tend = time()
print('{} {!r}, {!r}: {:.3} sec'.format(
method.__name__, args, kw, tend-tstart))
return result
return timed
| {
"repo_name": "dlab-berkeley/python-taq",
"path": "marketflow/utility.py",
"copies": "3",
"size": "1965",
"license": "bsd-2-clause",
"hash": 2235148573273910800,
"line_mean": 26.2916666667,
"line_max": 108,
"alpha_frac": 0.6055979644,
"autogenerated": false,
"ratio": 3.914342629482072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 72
} |
''' a set of utilities to create matrix representations (Laplacians) of graphs
'''
import numpy as np
def complete_gl(n):
"""
return the Laplacian of a complete graph (all nodes are connected to all
edges)
Parameters
----------
n : int
number of nodes in the graph
Examples
--------
>>> from robotarium.graph import complete_gl
>>> complete_gl(4)
array([[ 3., -1., -1., -1.],
[-1., 3., -1., -1.],
[-1., -1., 3., -1.],
[-1., -1., -1., 3.]])
"""
return n * np.eye(n) - np.ones((n, n))
def cycle_gl(n):
"""
return the Laplacian of a cycle graph (The order is assumed to be
1->2->3->...->n)
Parameters
----------
n : int
number of nodes in the graph
Examples
--------
>>> from robotarium.graph import cycle_gl
>>> cycle_gl(4)
array([[ 2., -1., 0., -1.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[-1., 0., -1., 2.]])
"""
laplacian = 2 * np.eye(n) - np.diag([1] * (n-1), 1) - \
np.diag([1] * (n-1), -1)
laplacian[n-1, 0] = -1
laplacian[0, n-1] = -1
return laplacian
def line_gl(n):
"""
return the Laplacian of a line graph
Parameters
----------
n : int
number of nodes in the graph
Examples
--------
>>> from robotarium.graph import line_gl
>>> line_gl(4)
array([[ 1., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 1.]])
"""
laplacian = 2 * np.eye(n) - np.diag(np.ones(n-1), 1) - \
np.diag(np.ones(n-1), -1)
laplacian[0, 0] = 1
laplacian[n-1, n-1] = 1
return laplacian
def random_connected_gl(v, e):
"""
Outputs a randomly generated, undirected, connected graph.
Laplacian with v - 1 + e edges
Parameters
----------
v : int
number of nodes
e : int
number of edges
Examples
--------
"""
laplacian = np.zeros((v, v))
for i in range(1, v):
edge = np.random.randint(i)
# Update adjacency relations.
laplacian[i, edge] = -1
laplacian[edge, i] = -1
# Update node degrees
laplacian[i, i] += 1
laplacian[edge, edge] += 1
# This works because all nodes have at least 1 degree. Choose from only
# upper diagonal portion.
temp = np.where(np.triu(laplacian).reshape(v*v) == 1)
pot_edges = temp[0]
sz = laplacian.shape
# num_edges = min(e, len(pot_edges))
num_edges = np.where(e <= len(pot_edges), e, len(pot_edges))
if num_edges <= 0:
return
# Indices of randomly chosen extra edges.
temp = np.random.permutation(len(pot_edges))
edge_indices = temp[0:num_edges]
i, j = ind_to_sub(sz, pot_edges[edge_indices])
# Update adjacency relation
laplacian[i, j] = -1
laplacian[j, i] = -1
# Update degree relation
laplacian[i, i] += 1
laplacian[j, j] += 1
return laplacian
def random_gl(v, e):
"""
Outputs a randomly generated, undirected, connected graph Laplacian with
'n' nodes.
Parameters
----------
v : SOMETHING
SOMETHING
e : SOMETHING
SOMETHING
"""
laplacian = np.tril(np.ones((v, v)))
# This works because I can't select diagonals
temp = np.where(np.triu(laplacian).reshape(v*v) == 0)
pot_edges = temp[0]
sz = laplacian.shape
# Rest to zeros
laplacian = np.zeros((v, v))
num_edges = np.where(e <= len(pot_edges), e, len(pot_edges))
# Indices of randomly chosen extra edges.
temp = np.random.permutation(len(pot_edges))
edge_indices = temp[0:num_edges]
i, j = ind_to_sub(sz, pot_edges[edge_indices])
# Update adjacency relation
laplacian[i, j] = -1
laplacian[j, i] = -1
# Update degree relation
laplacian[i, i] += 1
laplacian[j, j] += 1
return laplacian
def ind_to_sub(siz, ind):
"""
Subscripts from linear index.
This is a python formulation of the function ind2sub().
The original function can be found here:
https://www.mathworks.com/help/matlab/ref/ind2sub.html
The function provided below is a modification of a function provided here:
https://stackoverflow.com/questions/28995146/matlab-ind2sub-equivalent-in-python
The subtraction by one in the 'rows' variable is to keep index changes
consistent with a 0 index start compared to MATLAB's 1 start.
Parameters
----------
siz : int tuple
contains the size of the matrix that is passed through.
ind : np.ndarray
the matrix that the linear index subscripts will be derived.
Returns
-------
rows : np.ndarray
vector containing the equivalent row subscripts corresponding to each
linear index from the original matrix ind.
columns : np.ndarray
vector containing the equivalent column subscripts corresponding to each
linear index from the original matrix ind.
"""
ind[ind < 0] = -1
ind[ind >= siz[0] * siz[1]] = -1
rows = np.asarray((np.ceil(ind.astype('int') / siz[0]) - 1), dtype=int)
columns = (ind % siz[1])
return rows, columns
| {
"repo_name": "robotarium/robotarium-python-simulator",
"path": "robotarium/graph.py",
"copies": "1",
"size": "5247",
"license": "mit",
"hash": -7607100939141003000,
"line_mean": 22.85,
"line_max": 84,
"alpha_frac": 0.5561273108,
"autogenerated": false,
"ratio": 3.38953488372093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9440511481036883,
"avg_score": 0.0010301426968093634,
"num_lines": 220
} |
""" A set of utilities to manage pySpark SparkContext object
Assumes you have pyspark (and py4j) on the PYTHONPATH and SPARK_HOME is defined
"""
from future.utils import iteritems
from future.moves.urllib.request import urlopen
from functools import wraps
import collections
import logging
import time
import shutil
import sys
import os
import json
import thunder as td
import numpy as np
from multiprocessing import Process, Queue
from pyspark import SparkContext, SparkConf, RDD
def executor_ips(sc):
""" gets the unique ip addresses of the executors of the current application
This uses the REST API of the status web UI on the driver (http://spark.apache.org/docs/latest/monitoring.html)
:param sc: Spark context
:return: set of ip addresses
"""
try:
app_id = sc.applicationId
except AttributeError:
app_id = sc.getConf().get('spark.app.id')
# for getting the url (see: https://github.com/apache/spark/pull/15000)
try:
base_url = sc.uiWebUrl
except AttributeError:
base_url = sc._jsc.sc().uiWebUrl().get()
url = base_url + '/api/v1/applications/' + app_id + '/executors'
try:
data = json.load(urlopen(url))
except TypeError:
response = urlopen(url)
str_response = response.read().decode('utf-8')
data = json.loads(str_response)
ips = set(map(lambda x: x[u'hostPort'].split(':')[0], data))
return ips
def change(sc=None, app_name='customSpark', master=None, wait='ips', min_cores=None, min_ips=None, timeout=30,
refresh_rate=0.5, fail_on_timeout=False, **kwargs):
""" Returns a new Spark Context (sc) object with added properties set
:param sc: current SparkContext if None will create a new one
:param app_name: name of new spark app
:param master: url to master, if None will get from current sc
:param wait: when to return after asking for a new sc (or max of timeout seconds):
'ips': wait for all the previous ips that were connected to return (needs sc to not be None)
'cores': wait for min_cores
None: return immediately
:param min_cores: when wait is 'cores' will wait until defaultParallelism is back to at least this value.
if None will be set to defaultParallelism.
:param min_ips: when wait is 'ips' will wait until number of unique executor ips is back to at least this value.
if None will be set to the what the original sc had.
:param timeout: max time in seconds to wait for new sc if wait is 'ips' or 'cores'
:param fail_on_timeout: whether to fail if timeout has reached
:param refresh_rate: how long to wait in seconds between each check of defaultParallelism
:param kwargs: added properties to set. In the form of key value pairs (replaces '.' with '_' in key)
examples: spark_task_cores='1', spark_python_worker_memory='8g'
see: http://spark.apache.org/docs/latest/configuration.html
:return: a new SparkContext
"""
# checking input
if master is None and sc is None:
raise ValueError('Both master and sc are None')
if master is None:
master = sc.getConf().get(u'spark.master')
if wait == 'ips':
if sc is None:
if min_ips is None:
min_ips = 1
elif min_ips is None:
min_ips = len(executor_ips(sc))
elif wait == 'cores':
if min_cores is None:
if sc is None:
logging.getLogger('pySparkUtils').info('Both sc and min_cores are None: setting target_cores to 2')
min_cores = 2
else:
min_cores = sc.defaultParallelism
logging.getLogger('pySparkUtils').info('min_cores is None: setting target_cores to: %d' % min_cores)
elif wait is not None:
raise ValueError("wait should be: ['ips','cores',None] got: %s" % wait)
if sc is not None:
logging.getLogger('pySparkUtils').info('Stopping original sc with %d cores and %d executors' %
(sc.defaultParallelism, len(executor_ips(sc))))
sc.stop()
# building a new configuration with added arguments
conf = SparkConf().setMaster(master).setAppName(app_name)
for key in kwargs.keys():
name = key.replace('_', '.', 100)
value = kwargs[key]
conf = conf.set(name, value)
logging.getLogger('pySparkUtils').info('Setting %s to: %s' % (name, value))
# starting the new context and waiting for defaultParallelism to get back to original value
sc = SparkContext(conf=conf)
if wait == 'cores':
total_time = 0
while sc.defaultParallelism < min_cores and total_time < timeout:
time.sleep(refresh_rate)
total_time += refresh_rate
if fail_on_timeout and total_time >= timeout:
sc.stop()
raise RuntimeError('Time out reached when changing sc')
elif wait == 'ips':
total_time = 0
while len(executor_ips(sc)) < min_ips and total_time < timeout:
time.sleep(refresh_rate)
total_time += refresh_rate
if fail_on_timeout and total_time >= timeout:
sc.stop()
raise RuntimeError('Time out reached when changing sc')
logging.getLogger('pySparkUtils').info('Returning new sc with %d cores and %d executors' %
(sc.defaultParallelism, len(executor_ips(sc))))
return sc
def fallback(func):
""" Decorator function for functions that handle spark context.
If a function changes sc we might lose it if an error occurs in the function.
In the event of an error this decorator will log the error but return sc.
:param func: function to decorate
:return: decorated function
"""
@wraps(func)
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.getLogger('pySparkUtils').error('Decorator handled exception %s' % e, exc_info=True)
_, _, tb = sys.exc_info()
while tb.tb_next:
tb = tb.tb_next
frame = tb.tb_frame
for key, value in iteritems(frame.f_locals):
if isinstance(value, SparkContext) and value._jsc is not None:
return frame.f_locals[key]
logging.getLogger('pySparkUtils').error('Could not find SparkContext', exc_info=True)
return None
return dec
def thunder_decorator(func):
""" Decorator for functions so they could get as input a thunder.Images / thunder.Series object,
while they are expecting an rdd. Also will return the data from rdd to the appropriate type
Assumes only one input object of type Images/Series, and up to one output object of type RDD
:param func: function to decorate
:return: decorated function
"""
@wraps(func)
def dec(*args, **kwargs):
# find Images / Series object in args
result = None
args = list(args)
image_args = list(map(lambda x: isinstance(x, td.images.Images), args))
series_args = list(map(lambda x: isinstance(x, td.series.Series), args))
rdd_args = list(map(lambda x: isinstance(x, RDD), args))
# find Images / Series object in kwargs
image_kwargs = []
series_kwargs = []
rdd_kwargs = []
for key, value in iteritems(kwargs):
if isinstance(value, td.images.Images):
image_kwargs.append(key)
if isinstance(value, td.series.Series):
series_kwargs.append(key)
if isinstance(value, RDD):
rdd_kwargs.append(key)
# make sure there is only one
count = sum(image_args) + sum(series_args) + sum(rdd_args) + len(image_kwargs) + len(series_kwargs) + \
len(rdd_kwargs)
if count == 0:
raise ValueError('Wrong data type, expected [RDD, Images, Series] got None')
if count > 1:
raise ValueError('Expecting on input argument of type Series / Images, got: %d' % count)
# bypass for RDD
if sum(rdd_args) or len(rdd_kwargs):
return func(*args, **kwargs)
image_flag = None
# convert to rdd and send
if sum(image_args) > 0:
image_flag = True
index = np.where(image_args)[0][0]
args[index] = args[index].tordd()
result = func(*args, **kwargs)
if sum(series_args) > 0:
image_flag = False
index = np.where(series_args)[0][0]
args[index] = args[index].tordd()
result = func(*args, **kwargs)
if len(image_kwargs) > 0:
image_flag = True
kwargs[image_kwargs[0]] = kwargs[image_kwargs[0]].tordd()
result = func(*args, **kwargs)
if len(series_kwargs) > 0:
image_flag = False
kwargs[series_kwargs[0]] = kwargs[series_kwargs[0]].tordd()
result = func(*args, **kwargs)
if image_flag is None:
raise RuntimeError('Target function did not run')
# handle output
if not isinstance(result, tuple):
result = (result,)
result_len = len(result)
rdd_index = np.where(list(map(lambda x: isinstance(x, RDD), result)))[0]
# no RDD as output
if len(rdd_index) == 0:
logging.getLogger('pySparkUtils').debug('No RDDs found in output')
if result_len == 1:
return result[0]
else:
return result
if len(rdd_index) > 1:
raise ValueError('Expecting one RDD as output got: %d' % len(rdd_index))
result = list(result)
rdd_index = rdd_index[0]
# handle type of output
if image_flag:
result[rdd_index] = td.images.fromrdd(result[rdd_index])
else:
result[rdd_index] = td.series.fromrdd(result[rdd_index])
if result_len == 1:
return result[0]
else:
return result
return dec
@thunder_decorator
def balanced_repartition(data, partitions):
""" balanced_repartition(data, partitions)
Reparations an RDD making sure data is evenly distributed across partitions
for Spark version < 2.1 (see: https://issues.apache.org/jira/browse/SPARK-17817)
or < 2.3 when #partitions is power of 2 (see: https://issues.apache.org/jira/browse/SPARK-21782)
:param data: RDD
:param partitions: number of partition to use
:return: repartitioned data
"""
def repartition(data_inner, partitions_inner):
# repartition by zipping an index to the data, repartition by % on it and removing it
data_inner = data_inner.zipWithIndex().map(lambda x: (x[1], x[0]))
data_inner = data_inner.partitionBy(partitions_inner, lambda x: x % partitions_inner)
return data_inner.map(lambda x: x[1])
if isinstance(data, RDD):
return repartition(data, partitions)
else:
raise ValueError('Wrong data type, expected [RDD, Images, Series] got: %s' % type(data))
@thunder_decorator
def regroup(rdd, groups=10, check_first=False):
""" Regroup an rdd using a new key added that is 0 ... number of groups - 1
:param rdd: input rdd as a (k,v) pairs
:param groups: number of groups to concatenate to
:param check_first: check if first value is a key value pair.
:return: a new rdd in the form of (groupNum, list of (k, v) in that group) pairs
Example:
>>> data = sc.parallelize(zip(range(4), range(4)))
>>> data.collect()
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> data2 = regroup(data, 2)
>>> data2.collect()
[(0, [(0, 0), (2, 2)]), (1, [(1, 1), (3, 3)])]
"""
if check_first:
first = rdd.first()
if isinstance(first, (list, tuple, collections.Iterable)):
if len(first) != 2:
raise ValueError('first item was not not length 2: %d' % len(first))
else:
raise ValueError('first item was wrong type: %s' % type(first))
rdd = rdd.map(lambda kv: (kv[0] % groups, (kv[0], kv[1])), preservesPartitioning=True)
return rdd.groupByKey().mapValues(list)
@thunder_decorator
def save_rdd_as_pickle(rdd, path, batch_size=10, overwrite=False):
""" Saves an rdd by grouping all the records of each partition as one pickle file
:param rdd: rdd to save
:param path: where to save
:param batch_size: batch size to pass to spark saveAsPickleFile
:param overwrite: if directory exist whether to overwrite
"""
if os.path.isdir(path):
if overwrite:
logging.getLogger('pySparkUtils').info('Deleting files from: %s' % path)
shutil.rmtree(path)
logging.getLogger('pySparkUtils').info('Done deleting files from: %s' % path)
else:
logging.getLogger('pySparkUtils').error('Directory %s already exists '
'and overwrite is false' % path)
raise IOError('Directory %s already exists and overwrite is false'
% path)
rdd.glom().saveAsPickleFile(path, batchSize=batch_size)
logging.getLogger('pySparkUtils').info('Saved rdd as pickle to: %s' % path)
def load_rdd_from_pickle(sc, path, min_partitions=None, return_type='images'):
""" Loads an rdd that was saved as one pickle file per partition
:param sc: Spark Context
:param path: directory to load from
:param min_partitions: minimum number of partitions. If None will be sc.defaultParallelism
:param return_type: what to return:
'rdd' - RDD
'images' - Thunder Images object
'series' - Thunder Series object
:return: based on return type.
"""
if min_partitions is None:
min_partitions = sc.defaultParallelism
rdd = sc.pickleFile(path, minPartitions=min_partitions)
rdd = rdd.flatMap(lambda x: x)
if return_type == 'images':
result = td.images.fromrdd(rdd).repartition(min_partitions)
elif return_type == 'series':
result = td.series.fromrdd(rdd).repartition(min_partitions)
elif return_type == 'rdd':
result = rdd.repartition(min_partitions)
else:
raise ValueError('return_type not supported: %s' % return_type)
logging.getLogger('pySparkUtils').info('Loaded rdd from: %s as type: %s'
% (path, return_type))
return result
| {
"repo_name": "boazmohar/pySparkUtils",
"path": "pySparkUtils/utils.py",
"copies": "1",
"size": "14880",
"license": "mit",
"hash": 7299863086716235000,
"line_mean": 39.6806722689,
"line_max": 116,
"alpha_frac": 0.5997983871,
"autogenerated": false,
"ratio": 4.015110631408526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013377017441193394,
"num_lines": 357
} |
"""A set of utilities to use for the Redis database.
Licensed under the 3-clause BSD License:
Copyright (c) 2010, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import with_statement
import os, sys, time
from nkutils import *
import redis
from pprint import pprint, pformat
PPRINT_WIDTH = getConsoleSize()[0]
# SMALL UTILS
def typedget(cast, db, key, field=None, default=None):
"""Returns the value of the given key (and field if a hash) as the given type.
If not found, returns the given default (None as default)."""
try:
if field:
return cast(db.hget(key, field))
else:
return cast(db.get(key))
except TypeError:
return default
def intget(*args, **kw):
"""Returns the value of the given key (and field if a hash) as an int."""
return typedget(int, *args, **kw)
def floatget(*args, **kw):
"""Returns the value of the given key (and field if a hash) as an float."""
return typedget(float, *args, **kw)
def keyspec(db, keyspec):
"""Returns a set of keys from the given keyspec.
This can be one of:
(fmt, vals) - keys are generated by fmt % val
list - assumed to be list of keys; returned as-is
string - passed to db.keys() to get list
"""
# wildcard
if isinstance(keyspec, basestring): return db.keys(keyspec)
# fmt + vals
if len(keyspec) == 2 and not isinstance(keyspec[1], basestring):
keys = [keyspec[0] % v for v in keyspec[1]]
return keys
# everything else - just return as-is
return keyspec
def gethashdict(db, key, fields):
"""Returns a dict from the hash at the given key and for the given fields."""
return dict(fields, db.hmget(key, fields))
def iterhash(db, keys, fields, castfuncs=None):
"""Iterates through a set of hashvalues for a given set of keys.
The `keys` are passed to keyspec() to get actual list of keys.
The fields can be one of:
single string: returns single string as output, per key
list of strings: returns list of strings per key
dict of strings->castfunc: returns dict of casted vals per key
Returns (keys, retvals), where the latter is as above.
"""
keys = keyspec(db, keys)
if isinstance(fields, basestring): # get single value
vals = pipefunc(db, keys, 'hget', fields)
if castfuncs:
vals = map(castfuncs, vals)
elif isinstance(fields, dict): # dict of outputs
fields, funcs = zip(*sorted(fields.iteritems()))
vals = pipefunc(db, keys, 'hmget', fields)
vals = [[func(v) for func, v in zip(funcs, row)] for row in vals]
vals = [dict(zip(fields, row)) for row in vals]
else: # list of fields
vals = pipefunc(db, keys, 'hmget', fields)
if castfuncs:
if callable(castfuncs): # single casting function
vals = [map(castfuncs, row) for row in vals]
else: # multiple casting functions
assert len(castfuncs) == len(fields)
vals = [[func(v) for v in row] for row in vals]
return (keys, vals)
def idFromHash(db, key, hashname, counter):
"""Tries to get the id for 'key' from 'hashname'.
If it doesn't exist, increments 'counter' and sets the id
in the hash, and then finally returns the id.
Note that the id is NOT cast into an int."""
id = db.hget(hashname, key)
if id is None:
id = db.incr(counter)
db.hset(hashname, key, id)
return id
def makeqname(prefix=''):
"""Makes the qname() function with the given prefix"""
def qname(*args):
"""Returns a qname from the given list of args, separated by :"""
if prefix:
els = [prefix] + list(args)
else:
els = list(args)
return getListAsStr(els, ':')
return qname
def workername():
"""Returns a new random worker name"""
import uuid
return str(uuid.uuid1())
def pipefunc(db, keys, funcname, *args, **kw):
"""Runs a pipeline with the given funcname on each given key.
This function is equivalent to:
[db.funcname(k, *args, **kw) for k in keys]
"""
p = db.pipeline()
func = getattr(p, funcname)
[func(k, *args, **kw) for k in keys]
return p.execute()
def kpipefunc(db, keys, funcname, *args, **kw):
"""Runs pipefunc in a zip with the keys.
Returns pairs of (key, pipefunc(key))"""
keylist = list(keys)
return zip(keylist, pipefunc(db, keylist, funcname, *args, **kw))
def pipefuncs(db, keys, funcs):
"""Runs a pipeline with the given functions on each given key.
Note that each func must take exactly (key, pipeline) as args.
This is more annoying than pipefunc(), but sometimes this function is more useful.
This function is equivalent to:
[(funcs[0](k, db), funcs[1](k, db), ...) for k in keys]
"""
p = db.pipeline()
for k in keys:
for f in funcs:
f(k, p)
vals = p.execute()
ret = list(grouper(len(funcs), vals))
return ret
def pipedec(db):
"""Decorator for pipelining multiple redis gets/sets in live code.
WARNING: THIS IS VERY FRAGILE -- USE WITH EXTREME CAUTION!!!
Redis pipelines are great when only setting stuff, or when getting
the same value for lots of keys, but if you're doing a mix of things,
then it becomes annoying to separate out pipelines() for gets.
Often, you end up writing essentially the same code twice -- the first time
making all the pipeline calls, then calling execute(), then a second time
setting all the values you want to.
This decorator aims to simplify this process for common cases, by essentially
automating the above process.
Assumptions: your function MUST adhere to the following guidelines:
- Your code must be idempotent to being run twice. This includes:
* deterministic ordering (don't want to assign wrong values to wrong items!)
* allowing values to be set multiple times (first time will be the pipeline object)
* no list appending (since the first time will be the pipeline object)
* no dependencies between local variables that depend on redis-assigned values
- NO redis-dependent branches. The first time through there will be no values -- only pipelines.
Here is an example.
@pipedec(db)
def simple(p):
v = p.hget('key', 'field')
ret = p.hmset('k2', 'f', 42)
a = p.smembers('skey')
return ret
# this becomes the following
p = db.pipeline()
v = p.hget('key', 'field')
ret = p.hmset('k2', 'f', 42)
a = p.smembers('skey')
# return value ignored
vals = p.execute()
v = vals.pop(0)
ret = vals.pop(0)
a = vals.pop(0)
return ret
"""
#TODO see if this function is actually useful, given all these restrictions
redisfuncs = 'append blpop brpop brpoplpush config_get config_set dbsize decr delete echo exists expire expireat flushall flushdb get getbit getset hdel hexists hget hgetall hincrby hkeys hlen hmget hmset hset hsetnx hvals incr info keys lastsave lindex linsert llen lpop lpush lpushx lrange lrem lset ltrim mget move mset msetnx object persist ping publish randomkey rename renamenx rpop rpoplpush rpush rpushx sadd save scard sdiff sdiffstore set setbit setex setnx setrange sinter sinterstore sismember smembers smove sort spop srandmember srem strlen substr sunion sunionstore ttl type zadd zcard zcount zincrby zinterstore zrange zrangebyscore zrank zrem zremrangebyrank zremrangebyscore zrevrange zrevrangebyscore zrevrank zscore zunionstore'.split()
def ret(fn):
def newfn(*args, **kw):
p = db.pipeline()
times = [time.time()]
fn(p, *args, **kw)
print 'finished first call'
times.append(time.time())
vals = p.execute()
print 'finished execute'
times.append(time.time())
class ValueWrapper(object):
"""A wrapper over some values that simply returns them in order, no matter the method called."""
def __init__(self, vals):
self.vals = vals
for f in redisfuncs:
setattr(self, f, self.popnext)
def popnext(self, *args, **kw):
"""Returns the next value"""
print 'In popnext with args %s, kw %s' % (args, kw)
return self.vals.pop(0)
ret = fn(ValueWrapper(vals), *args, **kw)
times.append(time.time())
print 'Got times %s' % (getTimeDiffs(times))
return ret
newfn.__name__ = fn.__name__ + ' (pipeline decorated)'
return newfn
return ret
# INTERACTIVE/LARGER UTILS
def dbinfo(db, *patterns, **kw):
"""Gets types and lengths of keys matching given patterns.
If any pattern is a list, then assumes it's a list of keys.
Else, assumes it's a string and expands out using db.keys().
If no patterns are given, then processes all keys.
The lengths are defined by redis type as:
string: strlen
hash: hlen
set: scard
zset: zcard
list: llen
You can optionally give these kw params:
'outf': a stream (default: sys.stdout) to pretty print things on.
'detailed': if true (default: 0), then print the full items as well.
Returns a dict mapping keys to (type, length) if not detailed,
or (type, length, values) if detailed=1.
"""
from pprint import pformat
outf = kw.get('outf', sys.stdout)
detailed = kw.get('detailed', 0)
if patterns:
keys = []
for p in patterns:
if isinstance(p, basestring):
keys.extend(db.keys(p))
else:
keys.extend(p)
keys.sort()
else:
keys = sorted(db.keys())
types = pipefunc(db, keys, 'type')
p = db.pipeline()
if detailed:
# get detailed values
vfuncs = dict(string=p.get, hash=p.hgetall, zset=lambda k: p.zrange(k, 0, -1, withscores=1), set=p.smembers, list=lambda k: p.lrange(k, 0, -1))
[vfuncs[t](k) for k, t in zip(keys, types)]
vals = p.execute()
# in this case, the simple values are just the lengths of each thing
lengths = [len(v) for v in vals]
else:
# lengths only
lfuncs = dict(string=p.strlen, hash=p.hlen, zset=p.zcard, set=p.scard, list=p.llen)
[lfuncs[t](k) for k, t in zip(keys, types)]
lengths = p.execute()
# now print things out
ret = {}
typestrs = dict(string='STR ', hash='HASH', zset='ZSET', set='SET ', list='LIST')
for k, t in zip(keys, types):
l = lengths.pop(0)
v = vals.pop(0) if detailed else None
if outf:
print >>outf, '%s %7d %s' % (typestrs[t], l, k)
if detailed:
s = blockindent(pformat(v), indent=' '*14, initial=' '*14)
print >>outf, s
ret[k] = (t, l, v) if detailed else (t, l)
return ret
def dblist(db, *patterns, **kw):
"""Simple wrapper for dbinfo() with detailed=1"""
kw['detailed'] = 1
return dbinfo(db, *patterns, **kw)
def keysAtLevel(db, level, dlm=':'):
"""Returns the set of keys at the given "level".
This is defined as the number of elements of the key, when split by 'dlm'.
"""
level = int(level)
ret = [k for k in db.keys() if len(k.split(dlm)) == level]
return ret
def keysize(db, k):
"""Returns the estimate size, in bytes, of a key and its value.
The size of the key is simply its length.
For values, it depends on the datatype:
For strings, it's the length of the string
For hashes, it's the sum of the lengths of the fields and their values
For lists and sets, it's the sum of the lengths of each item
For sorted sets, it's the 8*num (for the scores) + length of each member
Returns 0 if the key is not found
"""
if not k: return 0
t = db.type(k)
if not t: return 0
ret = len(k)
if t == 'string':
ret += db.strlen(k)
elif t == 'hash':
d = db.hgetall(k)
for f, v in d.iteritems():
ret += len(f) + len(v)
elif t == 'list':
for el in db.lrange(k, 0, -1):
ret += len(el)
elif t == 'set':
for el in db.smembers(k):
ret += len(el)
elif t == 'zset':
for val in db.zrange(k, 0, -1):
ret += 8 + len(val)
return ret
def keymem(db, keys):
"""Returns detailed sizing information for the given keys.
The return object is a dictionary mapping keys to memory dicts.
Each memory dict contains:
- keys: list of key names. This is needed for aggregation, but in this
method, it simply contains a single element: the original key name.
- types: list of types, corresponding to the list in 'keys'
- length: the number of keys. Here, it's just 1.
- keylen: sum of the lengths of the keynames.
- num: length of sub-elements, if applicable. By type, this is:
string: 1
hash: hlen
list: llen
set: scard
zset: zcard
- skeylen: sum of the lengths of the subkeys. By type:
string: 0
hash: len(sum(hkeys))
list: 0
set: 0
zset: len(sum(zrange))
- svallen: sum of the lengths of the subvals. By type:
string: strlen
hash: len(sum(hvals))
list: len(sum(lrange))
set: len(sum(smembers))
zset: 8*zcard
- subtotal: skeylen + svallen
- total: subtotal + keylen
"""
# get types
p = db.pipeline()
[p.type(k) for k in keys]
types = p.execute()
# create return dicts
default = dict(length=1, num=0, keylen=0, skeylen=0, svallen=0, subtotal=0, total=0)
ret = [dict(keys=[k], types=[t], **default) for k, t in zip(keys, types)]
ret = dict(zip(keys, ret))
# partition keys by types
parts, junk = partitionByFunc(zip(keys, types), lambda p: p[1])
for type, ktpairs in parts.items():
cur, junk = zip(*ktpairs)
#print type, len(cur), cur[:5]
pl = lambda funcname, *args: pipefunc(db, cur, funcname, *args)
pll = lambda funcname, *args: [len(''.join(s)) for s in pl(funcname, *args)]
# Build up lists of each variable type
keylens = [len(k) for k in cur]
if type == 'string':
nums = [1] * len(cur)
skeylens = [0] * len(cur)
svallens = pl('strlen')
elif type == 'hash':
nums = pl('hlen')
skeylens = pll('hkeys')
svallens = pll('hvals')
elif type == 'list':
nums = pl('llen')
skeylens = [0] * len(cur)
svallens = pll('lrange', 0, -1)
elif type == 'set':
nums = pl('scard')
skeylens = [0] * len(cur)
svallens = pll('smembers')
elif type == 'zset':
nums = pl('zcard')
skeylens = pll('zrange', 0, -1)
svallens = pl('zcard')
else:
print 'Type is %s!' % (type)
nums = [0] * len(cur)
skeylens = [0] * len(cur)
svallens = [0] * len(cur)
# set all the values
for el in zip(cur, keylens, nums, skeylens, svallens):
k, rest = el[0], el[1:]
#print ' ', k, rest
for field, val in zip('keylen num skeylen svallen'.split(), rest):
ret[k][field] += val
ret[k]['subtotal'] = ret[k]['skeylen'] + ret[k]['svallen']
ret[k]['total'] = ret[k]['subtotal'] + ret[k]['keylen']
#pprint.pprint(ret)
return ret
def aggrmem(mems):
"""Aggregates memory information returned from keymem().
If mems is a dict, uses its values() only.
If mems is a list, uses it directly.
Returns a single memory structure, formatted just like keymem():
- keys: list of key names
- types: list of types, corresponding to the list in 'keys'
- length: total number of keys
- keylen: sum of the lengths of the keynames.
- num: total length of sub-elements
- skeylen: total sum of the lengths of the subkeys
- svallen: total sum of the lengths of the subvals
- subtotal: skeylen + svallen
- total: subtotal + keylen
"""
try:
mems = mems.values()
except Exception: pass
ret = dict(keys=[], types=[], length=0, keylen=0, num=0, skeylen=0, svallen=0, subtotal=0, total=0)
for d in mems:
ret['keys'].extend(d['keys'])
ret['types'].extend(d['types'])
for k in 'length keylen num skeylen svallen subtotal total'.split():
ret[k] += d[k]
return ret
def groupkeys(keys, patterns):
"""Groups the given set of keys using the given patterns.
It runs through the patterns sequentially, removing those from keys.
Returns a dict with {pattern: [matching keys]}.
Unmatches keys are added with None as the key."""
from collections import defaultdict
from fnmatch import fnmatch
ret = defaultdict(list)
for k in keys:
matched = 0
for p in patterns:
if fnmatch(k, p):
ret[p].append(k)
matched = 1
break
if not matched:
ret[None].append(k)
return dict(**ret)
def groupedmem(db, keys, patterns):
"""Returns memory information for the given keys grouped by the given patterns"""
groups = groupkeys(keys, patterns)
ret = {}
for p in patterns+[None]:
cur = groups.get(p, None)
if not cur: continue
mem = keymem(db, cur)
aggr = aggrmem(mem)
ret[p] = aggr
return ret
# REDIS-BASED MEMOIZATION
def makeredismemoize(prefix, host='127.0.0.1', port=6379, dbnum=0, timeout=60*60, flushold=0):
"""Function to create redis-based cache decorator.
Make sure it's a functional method (i.e., no side effects).
The first parameter is a prefix to use. Key names are found by adding the
function name and then the args and kwargs.
The next few parameters are database connection parameters.
The timeout parameter sets the TTL of all cache entries, in secs (default 1 hour).
If flushold is true, then it will flush all old keys beginning with the prefix.
"""
import cPickle as pickle
db = redis.Redis(host=host, port=port, db=dbnum)
x = db.keys('a') # we need to make a call to make sure the database is actually accessible
if flushold:
todel = db.keys('%s-*' % prefix)
db.delete(todel)
print >>sys.stderr, 'Deleting all old keys starting with %s' % (prefix,)
def actualret(fn):
curprefix = '%s-%s-' % (prefix, fn.__name__)
def newfn(*args, **kw):
keybase = repr((tuple(args), tuple(sorted(kw.items()))))
key = curprefix+keybase
# first see if we have the value in redis
ret = db.get(key)
if ret: return pickle.loads(ret)
print >>sys.stderr, 'Created redis memoize key %s\n%s' % (keybase, key)
# we didn't have it, so compute it
ret = fn(*args, **kw)
db.set(key, pickle.dumps(ret, -1))
db.expire(key, timeout)
return ret
newfn.__name__ = fn.__name__ + ' (REDIS MEMOIZED for %ss)' % (timeout,)
return newfn
return actualret
# REDIS-BASED QUEUES
class RedisQueueService(object):
"""A queue service which uses redis as the backing store.
Every item queued MUST be of the form (id, item), where the id is unique."""
def __init__(self, host='localhost', port=6379, db=0, password=None, timeout=1, retries=-1, socket_timeout=None, encoder='pickle'):
"""Initializes the queue service with the given parameters,
most of which are directly passed to the Redis() constructor.
The timeout parameter is used to determine the delay between retrying broken connections.
The retries parameter can be used to limit the number of retries on the initial connection attempt.
If retries < 0, then it retries forever.
If retries > 0, then it retries that many times.
The encoder sets the type of encoding to use. Options are:
'pickle' [default]: pickle
'json': json encoding
"""
self.host = host
self.port = port
self.db = db
self.password = password
self.timeout = timeout
self.socket_timeout = socket_timeout
self.encoder = encoder
assert self.encoder in 'pickle json'.split()
self.resetredis(retries)
def __str__(self):
"""Returns description of self"""
return 'RedisQueueService with host %s, port %s, db %s, password=%s...' % (self.host, self.port, self.db, self.password[:3] if self.password else None)
def resetredis(self, retries=-1):
"""Sets or resets the redis connection.
Keeps retrying given number of times (<0 means infinitely), using self.timeout as a delay"""
while retries != 0:
self.redis = redis.Redis(host=self.host, port=self.port, db=self.db, password=self.password, charset='ascii', socket_timeout=self.socket_timeout)
# make sure the connection is working
try:
x = self.redis.dbsize()
break
except redis.exceptions.ConnectionError:
retries -= 1
time.sleep(self.timeout)
def encode(self, obj):
"""Encodes the given object.
Uses our chosen encoder"""
if self.encoder == 'pickle':
import cPickle as pickle
return pickle.dumps(obj, -1)
elif self.encoder == 'json':
try:
import simplejson as json
except ImportError:
import json
return json.dumps(obj)
def decode(self, s):
"""Decodes an object from the given string"""
if self.encoder == 'pickle':
import cPickle as pickle
return pickle.loads(s)
elif self.encoder == 'json':
try:
import simplejson as json
except ImportError:
import json
return json.loads(s)
def getqueue(self, zsname, incr=-1, max=1, retries=-1):
"""Gets a queue to use based on a zset.
Chooses the item with the maximum score if max=1 (default), else the minimum score.
If incr is != 0 (default -1), then increments the priority of the chosen job with it.
Returns (item, score), with the old score (prior to incrementing).
If there were no items, or an error, returns None."""
while retries != 0:
try:
ret = self.redis.zrange(zsname, 0, 0, desc=max, withscores=1)
#log('For %s, Got a ret of %s' % (zsname, ret,))
if not ret: return ret
item, score = ret[0]
if incr:
#log('In getqeuue, incrby %s %s %s' % (zsname, item, incr))
self.redis.zincrby(zsname, item, incr)
return (item, score)
except redis.exceptions.ConnectionError:
self.resetredis(retries)
retries -= 1
def get(self, qname, callback=None, workname=None, popwork=1, timeout=-1, retries=-1, retorig=0):
"""Gets an item from the given qname in a robust way.
This involves:
- getting the object from the queue, optionally putting it on a working queue atomically
- decoding it to get an (id, item) pair
- optionally calling a callback with (id, item, qname, rqs)
- optionally removing it from working queue if it was added there (if popwork=1, which is the default)
- returning an (id, item) tuple
By default, this is a non-blocking call (timeout < 0), meaning that it returns None
if there was no object ready.
You can make it blocking by setting timeout >= 0:
timeout=0: blocks forever
timeout>0: blocks for given number of seconds (cast into an int), then returns result or None on error.
Blocking catches connection errors and retries again.
The number of retries can be given, and is infinite (< 0) by default
If retorig is true, then returns ((id, item), originalobj). This is useful for manually removing items from the work queue later.
"""
obj = None
while retries != 0:
try:
if timeout < 0:
# non-blocking
if workname:
obj = self.redis.rpoplpush(qname, workname)
else:
obj = self.redis.rpop(qname)
break
else:
# blocking
if workname:
obj = self.redis.brpoplpush(qname, workname, int(timeout))
else:
obj = self.redis.brpop(qname, int(timeout))
break
except redis.exceptions.ConnectionError:
self.resetredis(retries)
retries -= 1
# at this point, if there was no object, return None
if not obj: return obj
#log('@@ Got obj %s of type %s' % (obj, type(obj)))
# there was an object, so decode it
id, item = self.decode(obj)
if callback:
callback(id, item, qname, self)
#x = self.redis.srem(hashname, id)
#log('** Removing %s from set %s and got %s' % (id, hashname, x))
# remove it from working queue if we added it there and we want to remove it
if workname and popwork:
x = self.redis.lrem(workname, obj, num=1)
#log('## Removing %s from workq %s and got %s' % (obj, workname, x))
if retorig: return ((id, item), obj)
return (id, item)
def getmany(self, qname, func, num=-1, **kw):
"""Gets and processes many items from the given input queue.
The num determines how many items to get:
< 0: used as a total time limit for amount of time spent getting from queue
= 0: until the queue is empty
> 0: maximum number of items to get from the queue
"""
# first get a bunch of items
todo = []
origs = []
t1 = time.time()
while 1:
# try to get an item
obj = self.get(qname, popwork=0, retorig=1, **kw)
if not obj: break # no more items
obj, orig = obj
todo.append(obj)
origs.append(orig)
if num > 0: # max number of items
if len(todo) > num: break
elif num < 0: # maximum amount of time
if time.time() - t1 > abs(num): break
# now process
ret = func(todo)
# remove from workingoutq
if 'workname' in kw:
for obj in origs:
x = self.redis.lrem(kw['workname'], obj, num=1)
# return results
return ret
def put(self, id, item, qname, callback=None, retries=-1):
"""Puts an item on the given qname in a robust way.
This involves:
- encoding the (id, item) into an obj
- optionally calling a callback with (id, item, qname, rqs)
- putting the obj on the given output queue
The number of retries can be given, and is infinite (< 0) by default
"""
obj = self.encode((id, item))
while retries != 0:
try:
x = self.redis.lpush(qname, obj)
#log('## putting %s on %s and got %s' % (obj, qname, x))
if callback:
callback(id, item, qname, self)
break
except redis.exceptions.ConnectionError:
self.resetredis(retries)
retries -= 1
def putmany(self, ids, items, qname, callback=None, retries=-1):
"""Puts many items on the given qname in a robust way.
This involves:
- encoding each (id, item) into an obj
- optionally calling a callback with (id, item, qname, rqs)
- putting each obj on the given output queue
The number of retries can be given, and is infinite (< 0) by default
"""
objs = [self.encode((id, item)) for id, item in zip(ids, items)]
p = self.redis.pipeline()
for obj, id, item in zip(objs, ids, items):
while retries != 0:
try:
x = p.lpush(qname, obj)
#log('## putting %s on %s and got %s' % (obj, qname, x))
if callback:
callback(id, item, qname, self)
break
except redis.exceptions.ConnectionError:
self.resetredis(retries)
retries -= 1
p.execute()
def setstatusmsg(self, hashname, hashkey, msg, pipeline=None):
"""Sets the status message.
Right now, it calls redis.hset(hashname, hashkey, '%s_%s' % (msg, timestamp)) unless
pipeline is set, in which case it calls that on the pipeline.
"""
p = pipeline if pipeline else self.redis.pipeline()
p.hset(hashname, hashkey, '%s_%s' % (msg, str(time.time())))
if not pipeline:
p.execute()
from Queue import Full, Empty
class RQSQueue(object):
"""An abstraction over RedisQueueService queues, with the python Queue.Queue interface.
This is not just a simple wrapper over Redis' list datatype, but a more sophisticated
interface which includes our conventions for working with different jobs.
"""
def __init__(self, inq=None, outq=None, incr=5, get_callback=None, put_callback=None, rqs=None, **rqs_kw):
"""Initializes this rqs queue object with the given rqs keywords.
You can set the inq and/or the outq with the basenames for the input and output queues.
These are assumed to end in :inq and :outq, respectively.
They should be implemented as zsets in redis, and contain jobids.
The appropriate job queue is gotten by appending :<jobid> to the qname.
These job queues should be redis lists.
The incr determines how many items to get from a single job queue before
moving on and checking the next queue.
The get_callback and put_callback functions are sent to the underlying rqs.get() and rqs.put() calls.
"""
if rqs:
self.rqs = rqs
else:
self.rqs = RedisQueueService(**rqs_kw)
self.retries = rqs_kw.get('retries', -1)
self.inq, self.outq = inq, outq
if self.inq: assert self.inq.endswith(':inq')
if self.outq: assert self.outq.endswith(':outq')
self.incr = incr
self.get_callback = get_callback
self.put_callback = put_callback
# set some instance variables
self.curincr = 0
self.jobid = None
def __str__(self):
"""Returns description of self"""
return 'RQSQueue with inq %s, outq %s and rqs %s' % (self.inq, self.outq, str(self.rqs))
def qsize(self):
"""Returns approximate queue size"""
return 5 #FIXME this is just a placeholder
def get(self, blocking=True, timeout=0):
"""Gets an item from the input queue.
The item will be a ((realid, jobid), item) pair.
To handle blocking and timeout, we do the following:
If not blocking, then cycle through all jobs once, with negative timeout on each.
If blocking with finite timeout (int, > 0), then cycle through jobs with the given timeouts on EACH job.
Return as soon as you get an item.
"""
tried = set()
nloops = 0
while 1:
#log('At top of loop, with curincr %s, nloops %s, jobid %s' % (self.curincr, nloops, self.jobid))
nloops += 1
if self.curincr == 0 or self.jobid is None:
# get a new job to work on
cur = self.rqs.getqueue(self.inq, incr=-1, max=1, retries=self.retries)
if not cur: raise Empty # nothing todo right now
# if we're here, then we have a jobid
self.jobid = cur[0]
if self.jobid in tried: raise Empty # we already tried to get an item from this job, and failed
# try to get an item from the currently selected job
q = ':'.join((self.inq, self.jobid))
if not blocking or timeout < 0: # don't block on this individual queue
curtimeout = -1
if blocking and timeout >= 0:
curtimeout = timeout
obj = self.rqs.get(q, callback=self.get_callback, workname=q.replace(':inq', ':inworkq'), timeout=curtimeout)
#log('In get, with nloops=%d, jobid %s, obj %s' % (nloops, self.jobid, obj))
if not obj:
# empty/error, so immediately try to get a new jobid
tried.add(self.jobid)
self.curincr = 0
continue
# we got an item, so wrap the id into (id, jobid)
id, item = obj
# increment counters
self.curincr += 1
if self.curincr > self.incr:
self.curincr = 0
# return the object
return ((id, self.jobid), item)
def put(self, item, blocking=True, timeout=0):
"""Puts an item on the queue.
The item must be a ((realid, jobid), actualitem) pair.
The actual item must be a tuple and will have its first element replaced by the realid.
We ignore the blocking and timeout parameters.
"""
id, item = item
#print 'Unpacked into id %s, item %s' % (id, item)
# unpack the id into the original "realid" and the jobid, which we tacked onto it
realid, jobid = id
#print 'Got realid %s, jobid %s' % (realid, jobid)
item = (realid,)+ tuple(item[1:])
#print 'Got new item %s' % (item,)
q = ':'.join((self.outq, str(jobid)))
#log(' Putting %s, %s on outq %s' % (realid, item, q))
self.rqs.put(realid, item, q, callback=self.put_callback, retries=self.retries)
def putmany(self, items, blocking=True, timeout=0):
"""Puts many items on the queue.
Each item must be a ((realid, jobid), actualitem) pair.
The actual item must be a tuple and will have its first element replaced by the realid.
We ignore the blocking and timeout parameters.
"""
toput = {}
# build up list of things toput, separated by qname
for item in items:
id, item = item
#print 'Unpacked into id %s, item %s' % (id, item)
# unpack the id into the original "realid" and the jobid, which we tacked onto it
realid, jobid = id
#print 'Got realid %s, jobid %s' % (realid, jobid)
item = (realid,)+ tuple(item[1:])
#print 'Got new item %s' % (item,)
q = ':'.join((self.outq, jobid))
#log(' Putting %s, %s on outq %s' % (realid, item, q))
toput.setdefault(q, []).append((realid, item))
# put all items by qname
for q, fullitems in toput:
realids, items = zip(*fullitems)
self.rqs.putmany(realids, items, q, callback=self.put_callback, retries=self.retries)
def get_nowait(self):
"""Non-blocking get"""
return self.get(blocking=0)
def put_nowait(self, item):
"""Non-blocking put"""
return self.put(item, blocking=0)
MAINDB_PATTERNS = [
'active_jobs',
'active_jobs:*',
'archived',
'archived:*',
'deletedjobs',
'errors:*:*',
'face_counter',
'faces:*',
'fintimes:*',
'image_counter',
'images:*:*',
'images:*',
'jobs',
'jobs:*:facemap',
'jobs:*:faces',
'jobs:*:images',
'jobs:*:results',
'jobs:*:status',
'jobs:*:tasks',
'jobs:*',
'resets:*:*',
'resettimes:*',
'results:json:*',
'temp_sub_*:*',
'todo_*:*',
'users:*:archived',
'users:*:jobs',
'users:*',
'user_counter',
'usermap',
'users',
]
RQS_PATTERNS = [
'faceservice:afs:clspriorities',
'faceservice:afs:clssizes',
'faceservice:afs:jobpriorities',
'faceservice:afs:jobsizes',
'faceservice:afs:perc_small',
'perc_small',
'rqs:*:inq',
'rqs:*:inq:*',
'rqs:*:inworkq:*',
'rqs:*:outq:*',
'rqs:*:outworkq:*',
'rqs:*:status:*',
'rqs:*:tempstatus:*',
'rqs:*:userinq:*',
'rqs:*:workers',
'rqs:*:workers:*',
]
def groupingmain(db):
"""Program for figuring out how to group a database"""
patterns = RQS_PATTERNS
if 0:
keys = db.keys()
for k in keys:
print k
sys.exit()
else:
keys = [l.strip() for l in open('rqskeys')]
print '%d keys' % (len(keys))
groups = groupkeys(keys, patterns)
for pat in patterns:
print '%8d\t%s' % (len(groups[pat]), pat)
print '%8d\tNone' % (len(groups.get(None, [])))
sys.exit()
def testmain(db):
"""Tests various functions"""
#keys = [l.strip() for i, l in enumerate(open('rqskeys')) if i < 10000000]
keys = sorted(db.keys())
if 0:
# keymem
ret = keymem(db, keys)
pprint.pprint(ret)
# aggrmem, dict
aggr = aggrmem(ret)
pprint.pprint(aggr)
# aggrmem, list
aggr = aggrmem(ret.values())
pprint.pprint(aggr)
# dbinfo
info = dbinfo(db, *keys)
pprint.pprint(info)
# dblist
vals = dblist(db, *keys)
pprint.pprint(vals)
# groupedmem
mem = groupedmem(db, keys, MAINDB_PATTERNS)
pprint.pprint(mem)
if __name__ == '__main__':
import json
# initialize the database
host = 'localhost'
port = 6379
db = 0
password = None
if len(sys.argv) < 1:
print 'Usage: python %s [<host=%s> [<port=%s> [<db=%s> [<password=%s>]]]]' % (sys.argv[0], host, port, db, password)
sys.exit()
try:
try:
# first see if the first arg is a json file with parameters
j = json.load(open(sys.argv[1]))
locals().update(j)
except Exception:
# otherwise, assume the parameters are in stdin
host = sys.argv[1]
port = int(sys.argv[2])
db = int(sys.argv[3])
password = sys.argv[4]
except Exception: pass
r = redis.Redis(host=host, port=port, db=db, password=password)
# init done, now run what you want
#testmain(r); sys.exit()
def i(*pat):
x = dbinfo(r, *pat)
def l(*pat):
x = dblist(r, *pat)
lev = lambda num: keysAtLevel(r, num)
def I(*pats):
ret = r.info()
def matches(s):
for pat in pats:
if pat in s: return 1
return 0
if pats:
validkeys = [k for k in ret if matches(k)]
ret = dict((k, ret[k]) for k in validkeys)
pprint(ret)
reali, reall, reallev, realI = i, l, lev, I # as backups
re = repl(locals=locals())
re.run()
| {
"repo_name": "neeraj-kumar/nkpylib",
"path": "nkredisutils.py",
"copies": "1",
"size": "40970",
"license": "bsd-3-clause",
"hash": -8097370372725752000,
"line_mean": 38.8540856031,
"line_max": 760,
"alpha_frac": 0.5836953869,
"autogenerated": false,
"ratio": 3.8062058714232627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48899012583232626,
"avg_score": null,
"num_lines": null
} |
"""A set of utilities to use for web apps made using web.py.
Licensed under the 3-clause BSD License:
Copyright (c) 2010, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time
import web
try:
import simplejson as json
except ImportError:
import json
from nkutils import *
# alias for web's storage
stor = web.storage
# turn off debugging output by default
web.config.debug = False
# create the globals dictionary for use in rendering templates
g = globals()
for func in [zip, sorted, len, range, enumerate, getattr]:
g[func.__name__] = func
RENDER_GLOBALS = g
def notfound(s='Error: 404 Page Not Found'):
"""Custom 404"""
return web.notfound(s)
# INITIALIZATION FUNCTIONS
#app = web.application(urls, globals())
#application = app.wsgifunc()
# sets the app's 404 to the notfound function
#app.notfound = notfound
def setupdb(**kw):
"""Sets up the database and adds some default args based on dbn.
Returns the db"""
assert 'dbn' in kw
# setup default options
dbn = kw['dbn']
if dbn == 'sqlite':
defargs = dict(isolation_level=None, timeout=50)
for k, v in defargs.items():
if k not in kw:
kw[k] = v
db = web.database(**kw)
# set some other params
if dbn == 'sqlite':
#db.query('pragma synchronous=normal') # 'full' is default, but this should be faster...and 'off' is mega fast, but unsafe
db.query('pragma read_uncommitted=true') # will NOT be ACID on reads, but that's fine...
#db.query('analyze;') # analyze to optimize query performance
return db
def initpytz():
"""Initialize pytz if available.
Right now this is more an example than anything else"""
import pytz
dt = datetime.datetime(1980,1,1, tzinfo=pytz.utc)
def applicationWrapperExample():
"""An example of how to wrap an app"""
_application = app.wsgifunc()
def application(environ, start_response):
global APIBASEDIR
APIBASEDIR = environ.get('APIBASEDIR', '')
#web.debug('Setting APIBASEDIR to %s, from env %s' % (APIBASEDIR, environ))
return _application(environ, start_response)
# SMALL UTILITIES
class NKStor(web.Storage):
"""A thin wrapper on web.py's storage class which returns empty strings for non-existent keys"""
def __getattr__(self, k):
"""Returns empty string if the given one doesn't exist"""
try:
return super(NKStor, self).__getattr__(k)
except AttributeError:
return ''
def __getitem__(self, k):
"""Returns empty string if the given one doesn't exist"""
try:
return super(NKStor, self).__getitem__(k)
except KeyError:
return ''
def mystorify(d):
"""Converts a python dictionary to a web.storage, recursively"""
if not isinstance(d, dict): return d
ret = stor(d)
for k in ret:
if isinstance(ret[k], dict):
ret[k] = mystorify(ret[k])
elif isinstance(ret[k], (list, tuple)):
ret[k] = [mystorify(el) for el in ret[k]]
return ret
class WebJSONEncoder(json.JSONEncoder):
"""Custom output for dates, etc."""
def default(self, obj):
from datetime import datetime, date
if isinstance(obj, web.Storage):
return dict(obj)
if isinstance(obj, datetime):
# drop timezone from datetime if it has it
#FIXME is this what we want?!!
obj = obj.replace(tzinfo=None)
return str(obj)
if isinstance(obj, date):
return str(obj)
return json.JSONEncoder.default(self, obj)
def jsonp(s, callback=''):
"""Wraps the given string with the callback function name given.
If callback is not given, then just returns s"""
if callback:
s = '%s(%s);' % (callback, s)
return s
def runinthread(target, args=(), kwargs={}, daemon=1, procs=[]):
"""Runs the target function with the given args and kwargs in a daemon thread"""
from threading import Thread
t = Thread(target=target, args=args, kwargs=kwargs)
t.setDaemon(daemon)
t.start()
procs.append(t)
# INPUT/OUTPUT UTILITIES
def html(d, elements, title=None, css=[], js=[], gatag=None, rel='', tmplfname='generic.tmpl', globalvars=None, **kw):
"""Renders the given page with given js, css, etc.
If gatag is given, then it's used as a google analytics id.
If rel is given, it's used as the basis for local style sheets and javascripts.
Otherwise, local scripts are at /static/
Remote scripts are always fine.
You can also specify globalvars as needed. zip and enumerate are always included.
"""
out = stor(css=css, js=js, elements=elements, gatag=gatag, rel=rel)
import web.template as template
if not globalvars:
globalvars = {}
globalvars.update(dict(zip=zip, enumerate=enumerate))
render = template.frender(tmplfname, globals=globalvars)
if not title:
title = d.title.title()
out.title = title
for k, v in kw.iteritems():
out[k] = v
return render(out)
def renderDict(d, tmpl):
"""Renders a given dictionary to a string using the given template fname."""
import web.template as template
renfunc = template.frender(tmpl, globals=RENDER_GLOBALS)
s = str(renfunc(d))
return s
def renderDictToFile(d, tmpl, outname=None, reldir=__file__, **kw):
"""Renders a dictionary using the given template.
This assumes the tmpl fname ends in .tmpl.
If no outname is given, then simply replaces .tmpl with .html
Existing outputs are renamed to '.'+outname temporarily, and then deleted.
The final output is passed through the html() function, with the given kw.
Computes the 'rel' parameter using the path given in 'reldir'.
The reldir defaults to the location of this file.
Returns the outname.
"""
s = renderDict(d, tmpl)
dir, fname = os.path.split(tmpl)
fname = fname.rsplit('.tmpl')[0]
if not outname:
outname = os.path.join(dir, fname + '.html')
print 'Rendering from %s to %s' % (os.path.join(dir, fname+'.tmpl'), outname)
# make parent dirs
try:
os.makedirs(os.path.dirname(outname))
except OSError: pass
# make sure any existing output is first renamed so render doesn't use it
try:
os.rename(outname, '.'+outname)
except OSError: pass
# compute the relative path to the static dir
cur = os.path.abspath(os.path.dirname(reldir))
outdir = os.path.abspath(os.path.dirname(outname))
prefix = os.path.commonprefix([cur, outdir])
levels = len(outdir.replace(prefix, '', 1).split('/')) - 1
if levels == 0:
rel = '.'
else:
rel = '/'.join('..' for i in range(levels))
#print cur, outdir, prefix, levels, rel
s = str(html(d, [s], rel=rel, **kw))
f = open(outname, 'wb')
f.write(s)
f.close()
# remove old output
try:
os.remove('.'+outname)
except OSError: pass
return outname
def rethandler(data, input, txtfmt=None, htmldat=None, htmlfunc=None, jsoncontent=1, **kw):
"""Returns results using the given dictionary of data.
The input is used to read the format ('input.fmt') and callback
for jsonp ('input.callback'). The default format is 'json'.
If the fmt is text, then txtfmt is either a format string
or a function that takes the data and returns a string.
If the format is html, then you have 3 options:
1. Set txtfmt to None and htmldat to be a pair of (title, renderfunc).
This is passed to html(), along with **kw.
2. Set txtfmt to None, htmlfunc to a custom html function, and htmldat
to be its args. Then htmlfunc(data, *htmldat, **kw) will be called.
3. Set txtfmt just as you would for 'txt' format and set htmldat to None.
This is just like formatting text output, but the content-type is set to html
If jsoncontent is true (the default), then sends a content-type of application/json
"""
formats = 'json txt html'.split()
fmt = input.get('fmt', 'json')
if fmt == 'json':
if jsoncontent:
web.header('Content-Type', 'application/json; charset=utf-8')
else:
web.header('Content-Type', 'text/plain; charset=utf-8')
s = json.dumps(data, cls=WebJSONEncoder, indent=2, sort_keys=1)
s = jsonp(s, input.get('callback', ''))
#web.debug('About to return json output: %s' % s)
return s
elif txtfmt is not None and fmt in 'txt html'.split():
web.header('Content-Type', 'text/%s; charset=utf-8' % ('plain' if fmt == 'txt' else 'html'))
if isinstance(txtfmt, basestring):
return txtfmt % (data)
else:
return txtfmt(data)
if fmt == 'html' and htmldat is not None:
if htmlfunc:
return htmlfunc(data, *htmldat, **kw)
else:
title, renderfunc = htmldat
return html(stor(), elements=[renderfunc(data)], title=title, **kw)
raise web.notfound('Illegal format (%s). Options are: %s' % (fmt, ', '.join(formats)))
def textrethandler(s, fmt='txt'):
"""A convenience function for using a ret handler for simple text or html.
Set fmt to 'txt' or 'html'."""
return rethandler({'s': s}, {'fmt': fmt}, txtfmt='%(s)s')
def imrethandlerfname(fname, params={}, cachedir='static/cache/', cachenamefunc=stringize, cacherep='static/', postfunc=None):
"""Returns a local path for the given image, after various manipulations.
Also checks the params for various image manipulation options:
'cache': if 0, then no caching is done.
if 1, then the image is cached in the given cachedir.
Caching is done by replacing cacherep (default='static/') in
the fname to the cachedir given. The default cachedir is
'static/cache/'.
Then, the other options passed to this function are used to generate a cache fname
using the cachenamefunc given (defaulting to stringize).
'aspect': landscape: rotates the image if needed to make it landscape.
portrait: rotates the image if needed to make it portrait.
'rot': Rotates the image counter-clockwise by this many degrees.
This must be a multiple of 90.
'w': Sets the width to this many pixels, maintaining aspect ratio.
Will not exceed original size.
'h': Sets the height to this many pixels, maintaining aspect ratio.
Will not exceed original size.
'crop': Given as 'x0,y0,x1,y1', this crops the image (at the original size).
If the crop rectangle given extends outside of the image, those areas
are filled with black.
Returns the raw data from the processed image, after setting web.header().
If there's any problem processing the image, a web.badrequest() is raised.
"""
from PIL import Image
import tempfile
from nkimageutils import croprect
ext = fname.rsplit('.')[-1].lower()
caching = 0 if 'cache' in params and int(params['cache']) == 0 else 1
if not cachenamefunc or not cachedir:
caching = 0
# generate the cachename and return the cached image, if it exists
opts = [(k, params[k]) for k in 'aspect crop rot w h'.split() if k in params]
if not cachedir.endswith('/'):
cachedir += '/'
outputfname = fname.replace(cacherep, cachedir) + '__'+stringize(opts)+'.'+ext
if caching and os.path.exists(outputfname): return outputfname
# if not, actually go through and apply all the transformations
t1 = time.time()
# first make sure we can open this image and get its size
try:
im = Image.open(fname)
w, h = im.size
except IOError: raise web.badrequest()
t2 = time.time()
# crop the image according to the crop parameters (given as x0,y0,x1,y1)
if 'crop' in params:
try:
rect = map(float, params['crop'].strip().split(','))
rect = [int(c+0.5) for c in rect]
assert len(rect) == 4
except Exception: raise web.badrequest()
im = croprect(im, rect, bg=(0,0,0))
w, h = im.size
# rotate the image (right-angles only)
if 'rot' in params:
r = int(params['rot'])
while r < 0:
r += 360
if r % 90 > 0: raise web.badrequest()
meth = {90: Image.ROTATE_90, 180: Image.ROTATE_180, 270: Image.ROTATE_270}[r]
im = im.transpose(meth)
# set aspect ratio explicitly to either 'landscape' or 'portrait'
if 'aspect' in params:
a = params['aspect'].strip().lower()
if a == 'landscape':
if im.size[0] < im.size[1]:
im = im.transpose(Image.ROTATE_90)
elif a == 'portrait':
if im.size[0] > im.size[1]:
im = im.transpose(Image.ROTATE_90)
t3 = time.time()
# resize image down to requested size
thumbsize = list(im.size)
if 'w' in params:
thumbsize[0] = int(params['w'])
if 'h' in params:
thumbsize[1] = int(params['h'])
im.thumbnail(thumbsize, Image.ANTIALIAS)
t4 = time.time()
# convert to color if it's a palette-based image
if im.mode == 'P':
im = im.convert('RGB')
if postfunc:
im = postfunc(im, fname)
t5 = time.time()
if caching: # using cache filename
tempname = outputfname
try:
os.makedirs(os.path.dirname(outputfname))
except OSError: pass
else: # using temp filename
f, tempname = tempfile.mkstemp(suffix='.'+ext)
os.close(f)
im.save(tempname)
web.debug('Returning image at %s (cached to %s) with params %s, for final size %s (%0.3f secs to open, %0.3f secs to rotate, %0.3f secs to resize, %0.3f secs to postfunc)' % (fname, tempname, params, im.size, t2-t1, t3-t2, t4-t3, t5-t4))
return tempname
def imrethandler(fname, params={}, cachedir='static/cache/', cachenamefunc=stringize, cacherep='static/', postfunc=None):
"""Returns the given image, setting the content type appropriately.
Simply a wrapper on imrethandlerfname().
Returns the raw data from the processed image, after setting web.header().
If there's any problem processing the image, a web.badrequest() is raised.
"""
fname = imrethandlerfname(fname=fname, params=params, cachedir=cachedir, cachenamefunc=cachenamefunc, cacherep=cacherep, postfunc=postfunc)
ext = fname.rsplit('.')[-1].lower()
web.header('Content-Type', 'image/%s' % (ext))
#TODO add etags?
return open(fname, 'rb').read()
def icongenerator(params={}, cachedir='static/cache/'):
"""Creates an icon with given parameters:
'cache': if 0, then no caching is done.
if 1, then the image is cached in the given cachedir.
The options passed to this function are used to generate a cache fname.
'w': Sets the width to this many pixels.
'h': Sets the height to this many pixels.
'fill': Sets the fill color
'outline': Sets the outline color
'shape': Sets the shape to draw:
'rect': rectangle/square
'oval': oval/circle
'uptri': triangle pointing up
'downtri': triangle pointing down
'lefttri': triangle pointing left
'righttri': triangle pointing right
'uppie': pie slice pointing up (tip at center)
'downpie': pie slice pointing down (tip at center)
'leftpie': pie slice pointing left (tip at center)
'rightpie': pie slice pointing right (tip at center)
'rot': Rotate the figure after generation by given number of degrees (counter-clockwise)
Returns the raw data from the processed image, after setting web.header().
If there's any problem processing the image, a web.badrequest() is raised.
"""
from PIL import Image, ImageDraw
import tempfile
web.header('Content-Type', 'image/png')
caching = 0 if 'cache' in params and int(params['cache']) == 0 else 1
if not cachedir:
caching = 0
# generate the cachename and return the cached image, if it exists
opts = [(k, params[k]) for k in 'w h fill outline shape rot'.split() if k in params]
if not cachedir.endswith('/'):
cachedir += '/'
outputfname = os.path.join(cachedir, 'icon-'+stringize(opts)+'.png')
if caching and os.path.exists(outputfname): return open(outputfname, 'rb').read()
# if not, actually go through and generate the image
w, h = [int(params.setdefault(f, 32)) for f in 'wh']
im = Image.new('RGBA', (w,h), (0,0,0,1))
draw = ImageDraw.Draw(im)
shape = params.setdefault('shape', 'oval')
options = {}
for k in 'fill outline'.split():
if k in params:
options[k] = params[k]
bbox = (0,0,w-1,h-1)
if shape in 'rect rectangle square'.split():
draw.rectangle(bbox, **options)
elif shape in 'oval ellipse circ circle'.split():
draw.ellipse(bbox, **options)
elif shape.endswith('pie'):
dir = shape[:-3]
start, end = dict(right=(135,225), up=(45,135), down=(225,315), left=(315,45))[dir]
draw.pieslice(bbox, start, end, **options)
elif shape.endswith('tri'):
draw.polygon((w//2,0,w-1,h-1,0,h-1), **options)
dir = shape[:-3]
angle = dict(right=Image.ROTATE_270, up=Image.FLIP_LEFT_RIGHT, down=Image.ROTATE_180, left=Image.ROTATE_90)[dir]
im = im.transpose(angle)
rot = int(params.get('rot', 0))
if rot:
im = im.rotate(rot, Image.BICUBIC, 1)
im.resize((w,h), Image.ANTIALIAS)
if caching: # using cache filename
tempname = outputfname
try:
os.makedirs(os.path.dirname(outputfname))
except OSError: pass
else: # using temp filename
f, tempname = tempfile.mkstemp(suffix='.png')
os.close(f)
im.save(tempname)
web.debug('Returning icon of size %s (cached to %s) with params %s' % (im.size, tempname, params))
return open(tempname, 'rb').read()
def watermarkpostfunc(im, fname, watermark, minsize=(0,0), loc=(-1,-1), opacity=1.0, **kw):
"""A postfunc to use for imrethandler which adds a watermark.
Options:
watermark: one of the following:
a string - rendered using createTextWatermark() and **kw.
an image - must be same size as im - simply composited on.
minsize: a 2-ple with minimum width and height requirements to create watermark.
loc: The (x,y) location to put the watermark. If a float, puts it at the given percentage.
If a positive int, puts it at the given offset to the top-left.
If a negative int, puts it at the given offset to the bottom-right.
opacity: determines how opaque to make the watermark (1.0 = fully opaque).
The output is converted to RGB.
Use genericBind to bind the 3rd arg onwards.
"""
from nkimageutils import createTextWatermark, watermarkImage
# check size
if im.size[0] < minsize[0] or im.size[1] < minsize[1]: return im
# figure out loc
outloc = []
for cur, lim in zip(loc, im.size):
# deal with negative values first
if cur < 0: # relative to bottom-right
# flip it around
if isinstance(cur, float):
cur = 1.0 - cur
else:
cur = lim - cur
# now deal with percentages
if isinstance(cur, float): # percentage
cur = int(lim * cur)
outloc.append(cur)
loc = tuple(outloc)
# figure out type of watermark
if isinstance(watermark, basestring):
watermark = createTextWatermark(watermark, im.size, loc, **kw)
im = watermarkImage(im, mark, opacity=opacity).convert('RGB')
return im
def savefile(input, fname, uploadvar='myfile', urlvar='url'):
"""Saves a file uploaded or from a url to the given fname.
If uploadvar is given (default 'myfile') and exists, that file is saved as
an upload. Note that the form must set enctype="multipart/form-data".
If urlvar is given (default 'url') and exists, that file is downloaded.
Uses urllib.urlretrieve, so set a custom url opener before-hand if needed.
Returns the fname the file was saved to.
If there's an error, raises web.notfound() with message.
"""
try:
os.makedirs(os.path.dirname(fname))
except OSError: pass
if uploadvar in input and input[uploadvar]:
#TODO store the original fname somewhere
try:
f = open(fname, 'wb')
f.write(input[uploadvar])
f.close()
except IOError, e:
raise web.notfound('Error getting fileupload from var %s - %s' % (uploadvar, e))
elif urlvar in input and input[urlvar]:
try:
fname, headers = urllib.urlretrieve(input[urlvar], fname)
except IOError, e:
raise web.notfound('Error getting url %s - %s' % (input[urlvar], e))
else:
raise web.notfound('Error: no url specified in %s and no file uploaded in %s' % (uploadvar, urlvar))
return fname
def get_content_type(filename, default='application/octet-stream'):
"""Guesses the content type from a filename, or uses the default"""
import mimetypes
return mimetypes.guess_type(filename)[0] or default
def encode_multipart_formdata(fields, files):
"""Encodes a multipart form request that contains normal fields as well as files to upload.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def postFilesToURL(url, fields, files):
"""Posts the given fields and files to an http address as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response as an httplib.HTTPResponse object.
Note that this doesn't catch any exceptions. You should catch them yourself.
"""
import urlparse, httplib
# parse url into host and path
urlparts = urlparse.urlsplit(url)
host, path = urlparts[1], urlparts[2]
# encode request
content_type, body = encode_multipart_formdata(fields, files)
# make the http request, with the appropriate headers
h = httplib.HTTPConnection(host)
h.putrequest('POST', path)
h.putheader('content-type', content_type)
h.putheader('content-length', str(len(body)))
h.endheaders()
# send the request and return the response.
h.send(body)
resp = h.getresponse()
return resp
# SECURITY/AUTH
def makesalt(saltlen=128):
"""Makes a random salt.
DEPRECATED: use the bcrypt-based hashpass() instead!
"""
import random, string
print >>sys.stderr, 'Warning: makesalt() is deprecated. Use hashpass(), which uses bcrypt, directly'
rchar = lambda: random.choice(string.letters+string.digits)
salt = ''.join([rchar() for i in range(saltlen)])
return salt
def oldhashpass(s, niters=1001):
"""Makes a hashed pass from the given string, stretching by the given number of iterations.
DEPRECATED: use the bcrypt-based hashpass() instead!"""
import hashlib
print >>sys.stderr, 'Warning: oldhashpass() is deprecated. Use hashpass(), which uses bcrypt, directly'
hash = s
for i in range(niters):
hash = hashlib.sha256(hash).hexdigest()
return hash
def hashpass(s, hashpw=None, workfactor=10):
"""Hashes a given string (usually a password) using bcrypt.
Call it with just a password to hash it for the first time. To check a
user-supplied password for correctness, call it with the user-given
password and the hashed password from a previous call to hashpass(), and
check that the output matches the hashed password.
This is the right way to hash passwords.
(See http://codahale.com/how-to-safely-store-a-password/ )
Do NOT use the old makesalt() function...bcrypt generates special salts and
prepends them to generated hashes, to prevent having to store them
separately. Thus, you can simply call hashpass(tocheck, hashedpasswd) to
check a password for validity, without screwing around with concatenating
salts or storing them separately in databases. This is portable across all
implementations of bcrypt (tested using python's py-bcrypt and Ruby's
bcrypt-ruby). The workfactor determines the exponential cost of hashing a
password. In early 2011, a workfactor of 10 (the current default) takes
about .01 seconds to run, which is a good compromise between speed and
slowness (for protecting against brute-force attacks).
The resulting hashed password is 60 characters long, and can include
letters, digits, and [$./]
"""
import bcrypt
if hashpw:
return bcrypt.hashpw(s, hashpw)
return bcrypt.hashpw(s, bcrypt.gensalt(workfactor))
class AuthException(Exception): pass
class InvalidUserException(AuthException): pass
class InvalidPasswordException(AuthException): pass
def oldauth(username, passwd, getuserfunc):
"""Checks the given username and password and returns a dict with fields username, userid.
DEPRECATED: use the bcrypt-based auth() instead!
Relies on a 'getuserfunc(username)' function which should query the appropriate databases
and return a dict with {username: username, passwd: hashpass, salt: salt, id: userid} or None on error.
Note that this can raise exceptions, which are guaranteed to be subclasses of AuthException.
"""
u = getuserfunc(username)
if not u:
raise InvalidUserException
salt, hash = u['salt'], u['passwd']
newhash = oldhashpass(passwd+salt)
if hash != newhash:
raise InvalidPasswordException
return dict(username=u['username'], userid=u['id'])
def auth(username, passwd, getuserfunc):
"""Checks the given username and password and returns a dict with fields username, userid.
Relies on a 'getuserfunc(username)' function which should query the appropriate databases
and return a dict with {username: username, passwd: hashedpass, id: userid} or None on error.
Note that this can raise exceptions, which are guaranteed to be subclasses of AuthException.
"""
u = getuserfunc(username)
if not u:
raise InvalidUserException
if hashpass(passwd, u['passwd']) != u['passwd']:
raise InvalidPasswordException
return dict(username=u['username'], userid=u['id'])
def testauth():
"""Tests auth-related functions"""
# check the hashpass() base function
p = 'dofij'
pw = hashpass(p)
print pw
x = hashpass(p, pw)
print x
assert x == pw, 'The two passes should be identical'
# check the auth() wrapper
u = 'user 1'
p = 'user password'
hashpw = hashpass(p)
userfunc = lambda uname: dict(username=uname, passwd=hashpw, id=1)
x = auth(u, p, userfunc)
print 'Should be Valid: ', x
try:
x = auth(u, 'wrong password', userfunc)
print 'Should never get here: ', x
except Exception, e:
print 'Should get InvalidPasswordException: got %s: %s' % (type(e), e)
try:
x = auth(u, 'user password', lambda u: None)
print 'Should never get here: ', x
except Exception, e:
print 'Should get InvalidUserException: got %s: %s' % (type(e), e)
def cleanparameters(parameters, torem='username passwd passwdconf Login Register'.split()):
"""Cleans parameters by removing sensitive information from it"""
for k in torem:
if k in parameters:
del parameters[k]
return parameters
def simplefilter(lst, start=0, ss=1, num=50, **kw):
"""Given a set of parameters, filters the list."""
ret = []
for i, el in enumerate(lst[int(start):]):
if len(ret) >= int(num): break
if (i-start) % int(ss) != 0: continue
ret.append(el)
return ret
# WEB CLASSES
class robots:
"""A default robots.txt handler that disallows everything"""
def GET(self):
return 'User-agent: *\nDisallow: /'
# this is how the main method should look for most webapps:
if __name__ == '__main__':
app.run()
#testauth()
| {
"repo_name": "neeraj-kumar/nkpylib",
"path": "nkwebutils.py",
"copies": "1",
"size": "30608",
"license": "bsd-3-clause",
"hash": 5244189327733948000,
"line_mean": 41.043956044,
"line_max": 241,
"alpha_frac": 0.6481965499,
"autogenerated": false,
"ratio": 3.8236102435977513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49718067934977517,
"avg_score": null,
"num_lines": null
} |
"""A set of utility functions
"""
from collections import OrderedDict
import csv
from warnings import warn
from .results import ResultDict
import pkgutil
import numpy as np
import scipy as sp
__all__ = ["scale_samples", "read_param_file",
"ResultDict", "avail_approaches"]
def avail_approaches(pkg):
'''Create list of available modules.
Arguments
---------
pkg : module
module to inspect
Returns
---------
method : list
A list of available submodules
'''
methods = [modname for importer, modname, ispkg in
pkgutil.walk_packages(path=pkg.__path__)
if modname not in
['common_args', 'directions', 'sobol_sequence']]
return methods
def scale_samples(params, bounds):
'''Rescale samples in 0-to-1 range to arbitrary bounds
Arguments
---------
bounds : list
list of lists of dimensions `num_params`-by-2
params : numpy.ndarray
numpy array of dimensions `num_params`-by-:math:`N`,
where :math:`N` is the number of samples
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# sample * (upper_bound - lower_bound) + lower_bound
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
def unscale_samples(params, bounds):
"""Rescale samples from arbitrary bounds back to [0,1] range
Arguments
---------
bounds : list
list of lists of dimensions num_params-by-2
params : numpy.ndarray
numpy array of dimensions num_params-by-N,
where N is the number of samples
"""
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# (sample - lower_bound) / (upper_bound - lower_bound)
np.divide(np.subtract(params, lower_bounds, out=params),
np.subtract(upper_bounds, lower_bounds),
out=params)
def nonuniform_scale_samples(params, bounds, dists):
"""Rescale samples in 0-to-1 range to other distributions
Arguments
---------
problem : dict
problem definition including bounds
params : numpy.ndarray
numpy array of dimensions num_params-by-N,
where N is the number of samples
dists : list
list of distributions, one for each parameter
unif: uniform with lower and upper bounds
triang: triangular with width (scale) and location of peak
location of peak is in percentage of width
lower bound assumed to be zero
norm: normal distribution with mean and standard deviation
lognorm: lognormal with ln-space mean and standard deviation
"""
b = np.array(bounds)
# initializing matrix for converted values
conv_params = np.zeros_like(params)
# loop over the parameters
for i in range(conv_params.shape[1]):
# setting first and second arguments for distributions
b1 = b[i][0]
b2 = b[i][1]
if dists[i] == 'triang':
# checking for correct parameters
if b1 <= 0 or b2 <= 0 or b2 >= 1:
raise ValueError('''Triangular distribution: Scale must be
greater than zero; peak on interval [0,1]''')
else:
conv_params[:, i] = sp.stats.triang.ppf(
params[:, i], c=b2, scale=b1, loc=0)
elif dists[i] == 'unif':
if b1 >= b2:
raise ValueError('''Uniform distribution: lower bound
must be less than upper bound''')
else:
conv_params[:, i] = params[:, i] * (b2 - b1) + b1
elif dists[i] == 'norm':
if b2 <= 0:
raise ValueError('''Normal distribution: stdev must be > 0''')
else:
conv_params[:, i] = sp.stats.norm.ppf(
params[:, i], loc=b1, scale=b2)
# lognormal distribution (ln-space, not base-10)
# paramters are ln-space mean and standard deviation
elif dists[i] == 'lognorm':
# checking for valid parameters
if b2 <= 0:
raise ValueError(
'''Lognormal distribution: stdev must be > 0''')
else:
conv_params[:, i] = np.exp(
sp.stats.norm.ppf(params[:, i], loc=b1, scale=b2))
else:
valid_dists = ['unif', 'triang', 'norm', 'lognorm']
raise ValueError('Distributions: choose one of %s' %
", ".join(valid_dists))
return conv_params
def read_param_file(filename, delimiter=None):
"""Unpacks a parameter file into a dictionary
Reads a parameter file of format::
Param1,0,1,Group1,dist1
Param2,0,1,Group2,dist2
Param3,0,1,Group3,dist3
(Group and Dist columns are optional)
Returns a dictionary containing:
- names - the names of the parameters
- bounds - a list of lists of lower and upper bounds
- num_vars - a scalar indicating the number of variables
(the length of names)
- groups - a list of group names (strings) for each variable
- dists - a list of distributions for the problem,
None if not specified or all uniform
Arguments
---------
filename : str
The path to the parameter file
delimiter : str, default=None
The delimiter used in the file to distinguish between columns
"""
names = []
bounds = []
groups = []
dists = []
num_vars = 0
fieldnames = ['name', 'lower_bound', 'upper_bound', 'group', 'dist']
with open(filename, 'rU') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter)
csvfile.seek(0)
reader = csv.DictReader(
csvfile, fieldnames=fieldnames, dialect=dialect)
for row in reader:
if row['name'].strip().startswith('#'):
pass
else:
num_vars += 1
names.append(row['name'])
bounds.append(
[float(row['lower_bound']), float(row['upper_bound'])])
# If the fourth column does not contain a group name, use
# the parameter name
if row['group'] is None:
groups.append(row['name'])
elif row['group'] is 'NA':
groups.append(row['name'])
else:
groups.append(row['group'])
# If the fifth column does not contain a distribution
# use uniform
if row['dist'] is None:
dists.append('unif')
else:
dists.append(row['dist'])
if groups == names:
groups = None
elif len(set(groups)) == 1:
raise ValueError('''Only one group defined, results will not be
meaningful''')
# setting dists to none if all are uniform
# because non-uniform scaling is not needed
if all([d == 'unif' for d in dists]):
dists = None
return {'names': names, 'bounds': bounds, 'num_vars': num_vars,
'groups': groups, 'dists': dists}
def compute_groups_matrix(groups):
"""Generate matrix which notes factor membership of groups
Computes a k-by-g matrix which notes factor membership of groups
where:
k is the number of variables (factors)
g is the number of groups
Also returns a g-length list of unique group_names whose positions
correspond to the order of groups in the k-by-g matrix
Arguments
---------
groups : list
Group names corresponding to each variable
Returns
-------
tuple
containing group matrix assigning parameters to
groups and a list of unique group names
"""
if not groups:
return None
num_vars = len(groups)
# Get a unique set of the group names
unique_group_names = list(OrderedDict.fromkeys(groups))
number_of_groups = len(unique_group_names)
indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)])
output = np.zeros((num_vars, number_of_groups), dtype=np.int)
for parameter_row, group_membership in enumerate(groups):
group_index = indices[group_membership]
output[parameter_row, group_index] = 1
return output, unique_group_names
def requires_gurobipy(_has_gurobi):
'''
Decorator function which takes a boolean _has_gurobi as an argument.
Use decorate any functions which require gurobi.
Raises an import error at runtime if gurobi is not present.
Note that all runtime errors should be avoided in the working code,
using brute force options as preference.
'''
def _outer_wrapper(wrapped_function):
def _wrapper(*args, **kwargs):
if _has_gurobi:
result = wrapped_function(*args, **kwargs)
else:
warn("Gurobi not available", ImportWarning)
result = None
return result
return _wrapper
return _outer_wrapper
| {
"repo_name": "willu47/SALib",
"path": "src/SALib/util/__init__.py",
"copies": "1",
"size": "9906",
"license": "mit",
"hash": -8948273230084584000,
"line_mean": 31.3725490196,
"line_max": 79,
"alpha_frac": 0.5802543913,
"autogenerated": false,
"ratio": 4.314459930313589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 306
} |
"""A set of utility functions
"""
from collections import OrderedDict
import csv
from warnings import warn
from .results import ResultDict
import pkgutil
import numpy as np # type: ignore
import scipy as sp # type: ignore
from scipy import stats
from typing import List
__all__ = ["scale_samples", "read_param_file",
"ResultDict", "avail_approaches"]
def avail_approaches(pkg):
'''Create list of available modules.
Arguments
---------
pkg : module
module to inspect
Returns
---------
method : list
A list of available submodules
'''
methods = [modname for importer, modname, ispkg in
pkgutil.walk_packages(path=pkg.__path__)
if modname not in
['common_args', 'directions', 'sobol_sequence']]
return methods
def scale_samples(params: np.ndarray, bounds: List):
'''Rescale samples in 0-to-1 range to arbitrary bounds
Arguments
---------
params : numpy.ndarray
numpy array of dimensions `num_params`-by-:math:`N`,
where :math:`N` is the number of samples
bounds : list
list of lists of dimensions `num_params`-by-2
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# sample * (upper_bound - lower_bound) + lower_bound
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
def unscale_samples(params, bounds):
"""Rescale samples from arbitrary bounds back to [0,1] range
Arguments
---------
bounds : list
list of lists of dimensions num_params-by-2
params : numpy.ndarray
numpy array of dimensions num_params-by-N,
where N is the number of samples
"""
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# (sample - lower_bound) / (upper_bound - lower_bound)
np.divide(np.subtract(params, lower_bounds, out=params),
np.subtract(upper_bounds, lower_bounds),
out=params)
def nonuniform_scale_samples(params, bounds, dists):
"""Rescale samples in 0-to-1 range to other distributions
Arguments
---------
problem : dict
problem definition including bounds
params : numpy.ndarray
numpy array of dimensions num_params-by-N,
where N is the number of samples
dists : list
list of distributions, one for each parameter
unif: uniform with lower and upper bounds
triang: triangular with width (scale) and location of peak
location of peak is in percentage of width
lower bound assumed to be zero
norm: normal distribution with mean and standard deviation
lognorm: lognormal with ln-space mean and standard deviation
"""
b = np.array(bounds)
# initializing matrix for converted values
conv_params = np.empty_like(params)
# loop over the parameters
for i in range(conv_params.shape[1]):
# setting first and second arguments for distributions
b1 = b[i][0]
b2 = b[i][1]
if dists[i] == 'triang':
# checking for correct parameters
if b1 <= 0 or b2 <= 0 or b2 >= 1:
raise ValueError('''Triangular distribution: Scale must be
greater than zero; peak on interval [0,1]''')
else:
conv_params[:, i] = sp.stats.triang.ppf(
params[:, i], c=b2, scale=b1, loc=0)
elif dists[i] == 'unif':
if b1 >= b2:
raise ValueError('''Uniform distribution: lower bound
must be less than upper bound''')
else:
conv_params[:, i] = params[:, i] * (b2 - b1) + b1
elif dists[i] == 'norm':
if b2 <= 0:
raise ValueError('''Normal distribution: stdev must be > 0''')
else:
conv_params[:, i] = sp.stats.norm.ppf(
params[:, i], loc=b1, scale=b2)
# lognormal distribution (ln-space, not base-10)
# paramters are ln-space mean and standard deviation
elif dists[i] == 'lognorm':
# checking for valid parameters
if b2 <= 0:
raise ValueError(
'''Lognormal distribution: stdev must be > 0''')
else:
conv_params[:, i] = np.exp(
sp.stats.norm.ppf(params[:, i], loc=b1, scale=b2))
else:
valid_dists = ['unif', 'triang', 'norm', 'lognorm']
raise ValueError('Distributions: choose one of %s' %
", ".join(valid_dists))
return conv_params
def read_param_file(filename, delimiter=None):
"""Unpacks a parameter file into a dictionary
Reads a parameter file of format::
Param1,0,1,Group1,dist1
Param2,0,1,Group2,dist2
Param3,0,1,Group3,dist3
(Group and Dist columns are optional)
Returns a dictionary containing:
- names - the names of the parameters
- bounds - a list of lists of lower and upper bounds
- num_vars - a scalar indicating the number of variables
(the length of names)
- groups - a list of group names (strings) for each variable
- dists - a list of distributions for the problem,
None if not specified or all uniform
Arguments
---------
filename : str
The path to the parameter file
delimiter : str, default=None
The delimiter used in the file to distinguish between columns
"""
names = []
bounds = []
groups = []
dists = []
num_vars = 0
fieldnames = ['name', 'lower_bound', 'upper_bound', 'group', 'dist']
with open(filename, 'r') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter)
csvfile.seek(0)
reader = csv.DictReader(
csvfile, fieldnames=fieldnames, dialect=dialect)
for row in reader:
if row['name'].strip().startswith('#'):
pass
else:
num_vars += 1
names.append(row['name'])
bounds.append(
[float(row['lower_bound']), float(row['upper_bound'])])
# If the fourth column does not contain a group name, use
# the parameter name
if row['group'] is None:
groups.append(row['name'])
elif row['group'] == 'NA':
groups.append(row['name'])
else:
groups.append(row['group'])
# If the fifth column does not contain a distribution
# use uniform
if row['dist'] is None:
dists.append('unif')
else:
dists.append(row['dist'])
if groups == names:
groups = None
elif len(set(groups)) == 1:
raise ValueError('''Only one group defined, results will not be
meaningful''')
# setting dists to none if all are uniform
# because non-uniform scaling is not needed
if all([d == 'unif' for d in dists]):
dists = None
return {'names': names, 'bounds': bounds, 'num_vars': num_vars,
'groups': groups, 'dists': dists}
def compute_groups_matrix(groups):
"""Generate matrix which notes factor membership of groups
Computes a k-by-g matrix which notes factor membership of groups
where:
k is the number of variables (factors)
g is the number of groups
Also returns a g-length list of unique group_names whose positions
correspond to the order of groups in the k-by-g matrix
Arguments
---------
groups : list
Group names corresponding to each variable
Returns
-------
tuple
containing group matrix assigning parameters to
groups and a list of unique group names
"""
if not groups:
return None
num_vars = len(groups)
# Get a unique set of the group names
unique_group_names = list(OrderedDict.fromkeys(groups))
number_of_groups = len(unique_group_names)
indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)])
output = np.zeros((num_vars, number_of_groups), dtype=int)
for parameter_row, group_membership in enumerate(groups):
group_index = indices[group_membership]
output[parameter_row, group_index] = 1
return output, unique_group_names
def requires_gurobipy(_has_gurobi):
'''
Decorator function which takes a boolean _has_gurobi as an argument.
Use decorate any functions which require gurobi.
Raises an import error at runtime if gurobi is not present.
Note that all runtime errors should be avoided in the working code,
using brute force options as preference.
'''
def _outer_wrapper(wrapped_function):
def _wrapper(*args, **kwargs):
if _has_gurobi:
result = wrapped_function(*args, **kwargs)
else:
warn("Gurobi not available", ImportWarning)
result = None
return result
return _wrapper
return _outer_wrapper
| {
"repo_name": "SALib/SALib",
"path": "src/SALib/util/__init__.py",
"copies": "1",
"size": "10001",
"license": "mit",
"hash": -1077771083079003900,
"line_mean": 31.3656957929,
"line_max": 79,
"alpha_frac": 0.5815418458,
"autogenerated": false,
"ratio": 4.310775862068965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 309
} |
"""A set of utility functions that are used elsewhere in radiotool
"""
import sys
from subprocess import check_output
try:
import libxmp
LIBXMP = True
except ImportError:
LIBXMP = False
import numpy as N
def log_magnitude_spectrum(frames):
"""Compute the log of the magnitude spectrum of frames"""
return N.log(N.abs(N.fft.rfft(frames)).clip(1e-5, N.inf))
def magnitude_spectrum(frames):
"""Compute the magnitude spectrum of frames"""
return N.abs(N.fft.rfft(frames))
def RMS_energy(frames):
"""Computes the RMS energy of frames"""
f = frames.flatten()
return N.sqrt(N.mean(f * f))
def normalize_features(features):
"""Standardizes features array to fall between 0 and 1"""
return (features - N.min(features)) / (N.max(features) - N.min(features))
def zero_crossing_last(frames):
"""Finds the last zero crossing in frames"""
frames = N.array(frames)
crossings = N.where(N.diff(N.sign(frames)))
# crossings = N.where(frames[:n] * frames[1:n + 1] < 0)
if len(crossings[0]) == 0:
print "No zero crossing"
return len(frames) - 1
return crossings[0][-1]
def zero_crossing_first(frames):
"""Finds the first zero crossing in frames"""
frames = N.array(frames)
crossings = N.where(N.diff(N.sign(frames)))
# crossings = N.where(frames[n - 1:-1] * frames[n:] < 0)
if len(crossings[0]) == 0:
print "No zero crossing"
return 0
return crossings[0][0] + 1
# Crossfading helper methods
# borrowed from echonest remix
def log_factor(arr):
return N.power(arr, 0.6)
def limiter(arr):
"""
Restrict the maximum and minimum values of arr
"""
dyn_range = 32767.0 / 32767.0
lim_thresh = 30000.0 / 32767.0
lim_range = dyn_range - lim_thresh
new_arr = arr.copy()
inds = N.where(arr > lim_thresh)[0]
new_arr[inds] = (new_arr[inds] - lim_thresh) / lim_range
new_arr[inds] = (N.arctan(new_arr[inds]) * 2.0 / N.pi) *\
lim_range + lim_thresh
inds = N.where(arr < -lim_thresh)[0]
new_arr[inds] = -(new_arr[inds] + lim_thresh) / lim_range
new_arr[inds] = -(
N.arctan(new_arr[inds]) * 2.0 / N.pi * lim_range + lim_thresh)
return new_arr
def linear(arr1, arr2):
"""
Create a linear blend of arr1 (fading out) and arr2 (fading in)
"""
n = N.shape(arr1)[0]
try:
channels = N.shape(arr1)[1]
except:
channels = 1
f_in = N.linspace(0, 1, num=n)
f_out = N.linspace(1, 0, num=n)
# f_in = N.arange(n) / float(n - 1)
# f_out = N.arange(n - 1, -1, -1) / float(n)
if channels > 1:
f_in = N.tile(f_in, (channels, 1)).T
f_out = N.tile(f_out, (channels, 1)).T
vals = f_out * arr1 + f_in * arr2
return vals
def equal_power(arr1, arr2):
"""
Create an equal power blend of arr1 (fading out) and arr2 (fading in)
"""
n = N.shape(arr1)[0]
try:
channels = N.shape(arr1)[1]
except:
channels = 1
f_in = N.arange(n) / float(n - 1)
f_out = N.arange(n - 1, -1, -1) / float(n)
if channels > 1:
f_in = N.tile(f_in, (channels, 1)).T
f_out = N.tile(f_out, (channels, 1)).T
vals = log_factor(f_out) * arr1 + log_factor(f_in) * arr2
return limiter(vals)
def segment_array(arr, length, overlap=.5):
"""
Segment array into chunks of a specified length, with a specified
proportion overlap.
Operates on axis 0.
:param integer length: Length of each segment
:param float overlap: Proportion overlap of each frame
"""
arr = N.array(arr)
offset = float(overlap) * length
total_segments = int((N.shape(arr)[0] - length) / offset) + 1
# print "total segments", total_segments
other_shape = N.shape(arr)[1:]
out_shape = [total_segments, length]
out_shape.extend(other_shape)
out = N.empty(out_shape)
for i in xrange(total_segments):
out[i][:] = arr[i * offset:i * offset + length]
return out
def wav_to_mp3(wavfn, delete_wav=False, lame_quality="V 2"):
mp3fn = ".".join(wavfn.split('.')[:-1]) + '.mp3'
check_output('lame -{} "{}"'.format(lame_quality, wavfn), shell=True)
if LIBXMP:
xmpfile = libxmp.XMPFiles(file_path=wavfn)
xmpfile2 = libxmp.XMPFiles(file_path=mp3fn, open_forupdate=True)
xmp = xmpfile.get_xmp()
try:
if xmpfile2.can_put_xmp(xmp):
xmpfile2.put_xmp(xmp)
else:
print "Could not convert xmp from wav to mp3"
except:
print "File has no xmp data"
xmpfile.close_file()
xmpfile2.close_file()
if delete_wav:
check_output('rm "{}"'.format(wavfn), shell=True)
| {
"repo_name": "ucbvislab/radiotool",
"path": "radiotool/utils.py",
"copies": "1",
"size": "4738",
"license": "isc",
"hash": 7685430849038703000,
"line_mean": 24.2021276596,
"line_max": 77,
"alpha_frac": 0.5930772478,
"autogenerated": false,
"ratio": 2.9761306532663316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4069207901066332,
"avg_score": null,
"num_lines": null
} |
"""A set of utility functions to evaluate expressions.
Sample Usage:
print(SgExpression.ExtractTokensFromExpression("name + issues_url"))
print(SgExpression.ExtractTokensFromExpressions(["name + issues_url", "issues_url - id"]))
print(SgExpression.IsAllTokensInAggregate(["avg(mycount + 5)", "max(5 + min(yo))"]))
print(SgExpression.IsAllTokensInAggregate(["avg(mycount + 5) + secondcount", "max(5 + min(yo))"]))
table = tb.SgTable()
table.SetFields([u"a", u"b", u"a_b", u"c"])
table.Append([1, 2, 3, u"A"])
table.Append([2, 4, 6, u"BB"])
table.Append([3, 6, 9, u"CCC"])
table.Append([4, 8, 12, u"ABC"])
print(SgExpression.EvaluateExpression(table, u"a * (b - a_b)"))
print(SgExpression.EvaluateExpression(table, u"MIN(a * (b - a_b))"))
print(SgExpression.EvaluateExpression(table, u"MAX(a * (b - a_b))"))
print(SgExpression.EvaluateExpression(table, u"-7 + a*(b-a_b)"))
print(SgExpression.EvaluateExpression(table, u"max(a*(b-a_b))"))
print("---")
print(SgExpression.EvaluateExpression(table, u"a * b - a_b + a_b % a"))
print(SgExpression.EvaluateExpression(table, u"MIN(a * b - a_b + a_b % a)"))
print(SgExpression.EvaluateExpression(table, u"MAX(a * b - a_b + a_b % a)"))
print(SgExpression.EvaluateExpression(table, u"a*b-a_b+a_b%a"))
print(SgExpression.EvaluateExpression(table, u"max(a*b-a_b+a_b%a)"))
print("---")
print(SgExpression.EvaluateExpression(table, u"3 + 2 = 5"))
print(SgExpression.EvaluateExpression(table, u"6 = 3 + 2"))
print(SgExpression.EvaluateExpression(table, u"a_b * a_b - b > 10"))
print(SgExpression.EvaluateExpression(table, u"\"aaa\" is \"aaa\""))
print(SgExpression.EvaluateExpression(table, u"c like \"%\""))
print(SgExpression.EvaluateExpression(table, u"c like \"%A\""))
print(SgExpression.EvaluateExpression(table, u"c like \"C_C\""))
print(SgExpression.EvaluateExpression(table, u"c like \"A\""))
print(SgExpression.EvaluateExpression(table, "\"%%%\" like \"\\%_\\%\""))
print(SgExpression.EvaluateExpression(table, "\"a%%\" like \"\\%_\\%\""))
print(SgExpression.EvaluateExpression(table, "\"%a%\" like \"\\%_\\%\""))
print(SgExpression.EvaluateExpression(table, u"c regexp \"B*\""))
print(SgExpression.EvaluateExpression(table, u"c regexp \"[B-C]*\""))
print(SgExpression.EvaluateExpression(table, u"\"BB\" in (\"A\", \"B\", c)"))
print("---")
print(SgExpression.EvaluateExpression(table, "not a + 2 >= b"))
print(SgExpression.EvaluateExpression(table, "not a + 2 >= b and a_b > 10"))
print(SgExpression.EvaluateExpression(table, "b < 5 || a_b > 10"))
print("---")
print(SgExpression.EvaluateExpression(table, "sum(a + b)"))
print(SgExpression.EvaluateExpression(table, "avg(a * a)"))
print(SgExpression.EvaluateExpression(table, u"CONCAT(\"a\", c, \"ccc\", -7 + 8)"))
"""
import re
import regex # need recursive pattern
import definition as df
import utilities as util
import table as tb
import math
import datetime
class SgExpression:
"""A set of utility functions to evaluate expressions."""
# (?:something) means a non-capturing group
# Matches anything word that isn't postfixed with a '(' (not a function name)
# Adding a non-alpha character as matching postfix to prevent cases like 'www(' having a match 'ww'
_TOKEN_BODY = r"([a-zA-Z_]+)"
_TOKEN_POST = r"(?:[^\(a-zA-Z_]|$)"
_TOKEN_REGEX = _TOKEN_BODY + _TOKEN_POST
_DBL_STR_REGEX = r"\"(?:[^\\\"]|\\.)*\""
_SGL_STR_REGEX = r"\'(?:[^\\\']|\\.)*\'"
@classmethod
def ExtractTokensFromExpressions(cls, exprs):
ret_set = set()
for expr in exprs:
if expr == u"*":
return [u"*"]
expr_rem = re.sub(cls._DBL_STR_REGEX, r"", expr)
expr_rem = re.sub(cls._SGL_STR_REGEX, r"", expr_rem) # string literals removed
for token in re.findall(cls._TOKEN_REGEX, expr_rem):
if not token in df.ALL_TOKENS:
ret_set.add(token)
return list(ret_set)
@classmethod
def IsAllTokensInAggregate(cls, exprs):
aggr_regex = r"((?:" + r"|".join(df.AGGREGATE_FUNCTIONS) + r")\((?:(?>[^\(\)]+|(?R))*)\))"
for expr in exprs:
expr_rem = re.sub(cls._DBL_STR_REGEX, r"", expr)
expr_rem = re.sub(cls._SGL_STR_REGEX, r"", expr_rem) # string literals removed
while True:
prev_len = len(expr_rem)
expr_rem = regex.sub(aggr_regex, r"", expr_rem) # one aggregate function removed
if len(expr_rem) == prev_len:
break
if re.search(cls._TOKEN_REGEX, expr_rem):
return False
return True
@classmethod
def _IsFieldTokenCharacter(cls, ch):
return ch.isalpha() or ch == u"_"
@classmethod
def _IsOperatorCharacter(cls, ch):
return not ch.isspace()
@classmethod
def _IsNumericCharacter(cls, ch):
return ch.isdigit() or ch == u"."
@classmethod
def _GetPrecedence(cls, opr):
return df.PRECEDENCE[opr] if opr else -100
@classmethod
def _EvaluateOperatorBack(cls, opds, oprs):
opr = oprs[-1]
oprs.pop()
rows = len(opds)
if opr == u",": # special case: have to process every u","
for i in range(rows):
opds[i] = opds[i][:-2] + [opds[i][-2] + [opds[i][-1]]]
elif opr == u"*":
for i in range(rows):
res = opds[i][-2] * opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"/":
for i in range(rows):
res = opds[i][-2] / opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"%":
for i in range(rows):
res = opds[i][-2] % opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"+":
for i in range(rows):
res = opds[i][-2] + opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"-":
for i in range(rows):
res = opds[i][-2] - opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"==": # shouldn't work with None but it does atm
for i in range(rows):
res = opds[i][-2] == opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u">=":
for i in range(rows):
res = opds[i][-2] >= opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u">":
for i in range(rows):
res = opds[i][-2] > opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"<=":
for i in range(rows):
res = opds[i][-2] <= opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"<":
for i in range(rows):
res = opds[i][-2] < opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"<>":
for i in range(rows):
res = opds[i][-2] != opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"!=":
for i in range(rows):
res = opds[i][-2] != opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"is":
for i in range(rows):
res = opds[i][-2] == opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"like":
for i in range(rows):
is_escaping = False
regex = r""
for ch in opds[i][-1]:
if is_escaping: # \% \_
regex += ch
is_escaping = False
elif ch == "\\":
is_escaping = True
elif ch == "%":
regex += ".*"
elif ch == "_":
regex += "."
else:
regex += re.escape(ch)
regex += r"$"
res = True if opds[i][-2] and re.match(regex, opds[i][-2]) else False
opds[i] = opds[i][:-2] + [res]
elif opr == u"regexp":
for i in range(rows):
regex = re.compile(opds[i][-1] + "$")
res = True if opds[i][-2] and re.match(regex, opds[i][-2]) else False
opds[i] = opds[i][:-2] + [res]
elif opr == u"in":
for i in range(rows):
res = opds[i][-2] in opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == u"not":
for i in range(rows):
opds[i][-1] = not opds[i][-1]
elif opr in (u"and", u"&&"):
for i in range(rows):
res = opds[i][-2] and opds[i][-1]
opds[i] = opds[i][:-2] + [res]
elif opr == "xor":
for i in range(rows):
res = opds[i][-2] != opds[i][-1] # assumes both are boolean's
opds[i] = opds[i][:-2] + [res]
elif opr in (u"or", u"||"):
for i in range(rows):
res = opds[i][-2] or opds[i][-1]
opds[i] = opds[i][:-2] + [res]
@classmethod
def _EvaluateFunction(cls, opds, func):
# TODO(lnishan): Add new function names to definitions.py
rows = len(opds)
if func == "zero": # dummy function
return [0] * rows
if func == "avg":
avg = sum(row[-1] for row in opds) / float(rows)
res = []
for i in range(rows):
res.append(avg)
return res
elif func == "count":
res = []
for i in range(rows):
res.append(rows)
return res
elif func == "max":
mx = max(row[-1] for row in opds)
res = []
for i in range(rows):
res.append(mx)
return res
elif func == "min":
mn = min(row[-1] for row in opds)
res = []
for i in range(rows):
res.append(mn)
return res
elif func == "sum":
sm = sum(row[-1] for row in opds)
res = []
for i in range(rows):
res.append(sm)
return res
elif func == "ascii":
res = []
for row in opds:
res.append(u" ".join(str(ord(i)) for i in row[-1]))
return res
elif func == "concat":
res = []
for row in opds:
cstr = u""
for val in row[-1]:
cstr += util.GuaranteeUnicode(val)
res.append(cstr)
return res
elif func == "concat_ws":
res = []
for row in opds:
cstr = u""
sep = row[-1][0]
for val in row[-1][:-1]:
if val != sep:
cstr += util.GuaranteeUnicode(val)
cstr += sep
cstr += util.GuaranteeUnicode(row[-1][-1])
res.append(cstr)
return res
elif func == "find_in_set":
res =[]
for row in opds:
cstr = row[-1][-1]
subs = row[-1][-2]
if subs in cstr:
res.append(cstr.index(subs)+1)
else:
res.append(0)
return res
elif func == "insert":
res = []
for row in opds:
x = row[-1][-3] - 1
y = row[-1][-2]
str = row[-1][-4]
subs = row[-1][-1]
res.append(str[:x] + subs + str[x+y-1:])
return res
elif func == "instr":
res = []
for row in opds:
res.append(row[-1][-2].find(row[-1][-1])+1)
return res
elif func in (u"lcase", u"lower"):
res = []
for row in opds:
res.append(row[-1].lower())
return res
elif func == "left":
res = []
for row in opds:
n_char = row[-1][-1]
subs = row[-1][-2]
res.append(subs[:n_char])
return res
elif func == "length":
res = []
for row in opds:
res.append(len(row[-1]))
return res
elif func == "locate":
res = []
for row in opds:
x = len(row[-1])
if x == 3:
st_pos = row[-1].pop()
cstr = row[-1].pop()
subs = row[-1].pop()
if x == 3:
res.append(cstr.find(subs, st_pos)+1)
else:
res.append(cstr.find(subs)+1)
return res
elif func in (u"mid", u"substr", u"substring"):
res = []
for row in opds:
x = len(row[-1])
if x == 3:
n_len = row[-1].pop()
n_st = row[-1].pop() - 1
subs = row[-1].pop()
if x == 3:
n_end = n_st + n_len
res.append(subs[n_st:n_end])
else:
res.append(subs[n_st:])
return res
elif func == "repeat":
res = []
for row in opds:
cstr = u""
for i in range(row[-1][-1]):
cstr += row[-1][-2]
res.append(cstr)
return res
elif func == "replace":
res = []
for row in opds:
res.append(row[-1][-3].replace(row[-1][-2],row[-1][-1]))
return res
elif func == "right":
res = []
for row in opds:
n_char = row[-1][-1]
subs = row[-1][-2]
res.append(subs[-n_char:])
return res
elif func == "strcmp":
res = []
for row in opds:
res.append((row[-1][-1] == row[-1][-2]))
return res
elif func in (u"ucase", u"upper"):
res = []
for row in opds:
res.append(row[-1].upper())
return res
elif func == "abs":
res = []
for row in opds:
res.append(abs(row[-1]))
return res
elif func in (u"ceil", u"ceiling"):
res = []
for row in opds:
res.append(math.ceil(row[-1]))
return res
elif func == "exp":
res = []
for row in opds:
res.append(math.exp(row[-1]))
return res
elif func == "floor":
res = []
for row in opds:
res.append(math.floor(row[-1]))
return res
elif func == "greatest":
res = []
for row in opds:
res.append(max(row[-1]))
return res
elif func == "least":
res = []
for row in opds:
res.append(min(row[-1]))
return res
elif func in (u"ln", u"log"):
res = []
for row in opds:
res.append(math.log(row[-1]))
return res
elif func in (u"pow", u"power"):
res = []
for row in opds:
res.append(math.pow(row[-1][-2], row[-1][-1]))
return res
elif func == "sign":
res = []
for row in opds:
res.append((row[-1] > 0) - (row[-1] < 0))
return res
elif func == "sqrt":
res = []
for row in opds:
res.append(math.sqrt(row[-1]))
return res
elif func in (u"curdate", u"current_date"):
res = []
for row in opds:
res.append(datetime.date.today().strftime('%Y-%m-%d'))
return res
elif func in (u"curtime", u"current_time"):
res = []
for row in opds:
res.append(datetime.datetime.now().strftime('%H:%M:%S'))
return res
elif func in (u"current_timestamp", u"local", u"localtimestamp", u"now"):
res = []
for row in opds:
res.append(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
return res
elif func == "bin":
res = []
for row in opds:
res.append(bin(row[-1])[2:])
return res
else:
res = [row[-1] for row in opds]
return res
@classmethod
def _EvaluateOperator(cls, opds, oprs, opr=None):
prec = cls._GetPrecedence(opr)
rows = len(opds)
if opr == u"(":
oprs.append(u"")
oprs.append(opr)
elif opr == u")":
while oprs and oprs[-1] != u"(":
cls._EvaluateOperatorBack(opds, oprs)
oprs.pop()
func = oprs.pop().lower()
if func:
res = cls._EvaluateFunction(opds, func)
for i in range(rows):
opds[i][-1] = res[i]
elif opr == u",":
while oprs and cls._GetPrecedence(oprs[-1]) >= prec and oprs[-1] != ",":
cls._EvaluateOperatorBack(opds, oprs)
if (not oprs) or (oprs and oprs[-1] != ","):
for i in range(rows):
opds[i][-1] = [opds[i][-1]]
else:
oprs.pop()
for i in range(rows):
opds[i] = opds[i][:-2] + [opds[i][-2] + [opds[i][-1]]]
oprs.append(opr)
else:
while oprs and cls._GetPrecedence(oprs[-1]) >= prec :
cls._EvaluateOperatorBack(opds, oprs)
if opr:
oprs.append(opr)
@classmethod
def _ProcessOperator(cls, is_start, opds, oprs, token):
rows = len(opds)
token = token.lower()
if token == u"-":
token = u"--" if is_start else u"-"
elif token == u"=":
token = u"=="
cls._EvaluateOperator(opds, oprs, token)
@classmethod
def EvaluateExpression(cls, table, expr):
rows = len(table)
opds = []
oprs = []
for _ in range(rows):
opds.append([])
reading = None # None = nothing, 0 = operator, 1 = field tokens (can be operator too), 2 = number, 3 = string
is_start = True
is_escaping = False
string_ch = None
token = u""
expr += u" " # add a terminating character (to end token parsing)
for idx, ch in enumerate(expr):
if reading == 3: # string
if is_escaping:
# unescape characters
token += util.Unescape(ch)
is_escaping = False
elif ch == "\\":
is_escaping = True
elif ch == string_ch:
for i in range(rows):
opds[i].append(token)
token = u""
string_ch = None
reading = None
else:
token += ch
elif reading == 2: # number
if cls._IsNumericCharacter(ch):
token += ch
else:
num = float(token) if u"." in token else int(token)
for i in range(rows):
opds[i].append(num)
token = u""
if cls._IsOperatorCharacter(ch):
reading = 0
token = ch
if ch in (u"(", u","):
is_start = True
else:
reading = None
elif reading == 1:
if cls._IsFieldTokenCharacter(ch):
token += ch
else:
if token.lower() in df.OPERATOR_TOKENS:
cls._ProcessOperator(is_start, opds, oprs, token)
token = u""
if ch.isspace():
reading = None
else:
token = ch
if ch in (u"\"", "\'"):
reading = 3
token = u""
string_ch = ch
elif cls._IsNumericCharacter(ch):
reading = 2
elif cls._IsFieldTokenCharacter(ch):
reading = 1
elif cls._IsOperatorCharacter(ch):
reading = 0
if ch in (u"(", u","):
is_start = True
elif ch == u"(": # function
oprs.append(token)
oprs.append(ch)
idx2 = idx + 1
while idx2 < len(expr) and expr[idx2] == u" ":
idx2 = idx2 + 1
if idx2 < len(expr) and expr[idx2] == u")":
for i in range(rows):
opds[i].append(None)
is_start = True
token = u""
reading = None
else:
vals = table.GetVals(token)
for i in range(rows):
opds[i].append(vals[i])
token = u""
if cls._IsOperatorCharacter(ch):
reading = 0
token = ch
if ch in (u"(", u","):
is_start = True
else:
reading = None
elif reading == 0:
if token == u"":
is_opr = True
elif token in (u"(", u")", u",", u"+", u"-", u"*", u"/", u"%"):
is_opr = False # just to terminate the current segment
elif token and ch == u"(": # r".+\(" cannot be an operator
is_opr = False
elif token.isalpha():
is_opr = ch.isalpha()
else:
is_opr = (not ch.isalnum()) and (not ch.isspace())
if is_opr:
token += ch
else:
cls._ProcessOperator(is_start, opds, oprs, token)
token = u""
if ch.isspace():
reading = None
else:
token = ch
if ch in (u"\"", "\'"):
reading = 3
token = u""
string_ch = ch
elif cls._IsNumericCharacter(ch) or (ch == u"-" and is_start):
reading = 2
elif cls._IsFieldTokenCharacter(ch):
reading = 1
elif cls._IsOperatorCharacter(ch):
reading = 0
is_start = ch in (u"(", u",")
else: # None
if ch.isspace():
reading = None
else:
token += ch
if ch in (u"\"", u"\'"):
reading = 3
token = u""
string_ch = ch
elif cls._IsNumericCharacter(ch) or (ch == u"-" and is_start):
reading = 2
elif cls._IsFieldTokenCharacter(ch):
reading = 1
elif cls._IsOperatorCharacter(ch):
reading = 0
is_start = ch in (u"(", u",")
cls._EvaluateOperator(opds, oprs) # opr = None
return [row[0] for row in opds]
@classmethod
def EvaluateExpressions(cls, table, exprs):
ret = tb.SgTable()
ret.SetFields(exprs)
rows = len(table)
for _ in range(rows):
ret.Append([])
for expr in exprs:
res = cls.EvaluateExpression(table, expr)
for i, val in enumerate(res):
# TODO(lnishan): Fix the cheat here.
ret._table[i].append(val)
return ret
| {
"repo_name": "lnishan/SQLGitHub",
"path": "components/expression.py",
"copies": "1",
"size": "24893",
"license": "mit",
"hash": -2372579031499640000,
"line_mean": 37.3559322034,
"line_max": 118,
"alpha_frac": 0.4175471016,
"autogenerated": false,
"ratio": 3.871986312023643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9784628915733445,
"avg_score": 0.0009808995780394576,
"num_lines": 649
} |
"""A set of utility function to ease the use of OmniORBpy."""
__revision__ = '$Id: corbautils.py,v 1.2 2005-11-22 13:13:00 syt Exp $'
from omniORB import CORBA, PortableServer
import CosNaming
orb = None
def get_orb():
"""
returns a reference to the ORB.
The first call to the method initialized the ORB
This method is mainly used internally in the module.
"""
global orb
if orb is None:
import sys
orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
return orb
def get_root_context():
"""
returns a reference to the NameService object.
This method is mainly used internally in the module.
"""
orb = get_orb()
nss = orb.resolve_initial_references("NameService")
rootContext = nss._narrow(CosNaming.NamingContext)
assert rootContext is not None,"Failed to narrow root naming context"
return rootContext
def register_object_name(object, namepath):
"""
Registers a object in the NamingService.
The name path is a list of 2-uples (id,kind) giving the path.
For instance if the path of an object is [('foo',''),('bar','')],
it is possible to get a reference to the object using the URL
'corbaname::hostname#foo/bar'.
[('logilab','rootmodule'),('chatbot','application'),('chatter','server')]
is mapped to
'corbaname::hostname#logilab.rootmodule/chatbot.application/chatter.server'
The get_object_reference() function can be used to resolve such a URL.
"""
context = get_root_context()
for id, kind in namepath[:-1]:
name = [CosNaming.NameComponent(id, kind)]
try:
context = context.bind_new_context(name)
except CosNaming.NamingContext.AlreadyBound, ex:
context = context.resolve(name)._narrow(CosNaming.NamingContext)
assert context is not None, \
'test context exists but is not a NamingContext'
id,kind = namepath[-1]
name = [CosNaming.NameComponent(id, kind)]
try:
context.bind(name, object._this())
except CosNaming.NamingContext.AlreadyBound, ex:
context.rebind(name, object._this())
def activate_POA():
"""
This methods activates the Portable Object Adapter.
You need to call it to enable the reception of messages in your code,
on both the client and the server.
"""
orb = get_orb()
poa = orb.resolve_initial_references('RootPOA')
poaManager = poa._get_the_POAManager()
poaManager.activate()
def run_orb():
"""
Enters the ORB mainloop on the server.
You should not call this method on the client.
"""
get_orb().run()
def get_object_reference(url):
"""
Resolves a corbaname URL to an object proxy.
See register_object_name() for examples URLs
"""
return get_orb().string_to_object(url)
def get_object_string(host, namepath):
"""given an host name and a name path as described in register_object_name,
return a corba string identifier
"""
strname = '/'.join(['.'.join(path_elt) for path_elt in namepath])
return 'corbaname::%s#%s' % (host, strname)
| {
"repo_name": "h2oloopan/easymerge",
"path": "EasyMerge/clonedigger/logilab/common/corbautils.py",
"copies": "1",
"size": "3112",
"license": "mit",
"hash": -5464688936364603000,
"line_mean": 31.4166666667,
"line_max": 79,
"alpha_frac": 0.6552056555,
"autogenerated": false,
"ratio": 3.5647193585337917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47199250140337917,
"avg_score": null,
"num_lines": null
} |
"""A set of views every cart needs.
On success, each view returns a JSON-response with the cart
representation. For the details on the format of the return value,
see the :meth:`~easycart.cart.BaseCart.encode` method of the
:class:`~easycart.cart.BaseCart` class.
If a parameter required by a view is not present in the request's POST
data, then the JSON-response will have the format::
{'error': 'MissingRequestParam', 'param': parameter_name}
Almost the same thing happens, if a parameter is invalid and results in
an exception, which is a subclass of :class:`~easycart.cart.CartException`.
In this case, the error value will be the name of the concrete exception
class (e.g. ``'ItemNotInCart'`` or ``'NegativeItemQuantity'``).
And instead of ``param`` there may be one or more items providing
additional info on the error, for example, the primary key of an item
you was trying to change or an invalid quantity passed in the request.
Note
----
All of the views in this module accept only POST requests.
Warning
-------
The views in this module do not protect you from race conditions, which
may occur if, for example, server receives requests changing the cart
state almost simultaneously. It seems there's no good
platform-independent way to do it (see `this issue
<https://github.com/nevimov/django-easycart/issues/8>`_).
For now, I suggest to use JavaScript to ensure that you don't make
new requests to the cart until you have a response for the current one.
Feel free to reopen the issue, if you have any suggestions on how to
improve the situation.
"""
from importlib import import_module
from django.conf import settings
from django.http import JsonResponse
from django.views.generic import View
from easycart.cart import CartException
__all__ = [
'AddItem',
'RemoveItem',
'ChangeItemQuantity',
'EmptyCart',
]
cart_module, cart_class = settings.EASYCART_CART_CLASS.rsplit('.', 1)
Cart = getattr(import_module(cart_module), cart_class)
class CartView(View):
"""Base class for views operating the cart."""
action = None
"""Attribute of the cart object, which will be called to perform
some action on the cart.
"""
required_params = ()
"""Iterable of parameters, which MUST be present in the post data."""
optional_params = {}
"""Dictionary of parameters, which MAY be present in the post data.
Parameters serve as keys. Associated values will be used as fallbacks
in case the parameter is not in the post data.
"""
def post(self, request):
# Extract parameters from the post data
params = {}
for param in self.required_params:
try:
params[param] = request.POST[param]
except KeyError:
return JsonResponse({
'error': 'MissingRequestParam',
'param': param,
})
for param, fallback in self.optional_params.items():
params[param] = request.POST.get(param, fallback)
# Perform an action on the cart using these parameters
cart = Cart(request)
action = getattr(cart, self.action)
try:
action(**params)
except CartException as exc:
return JsonResponse(dict({'error': exc.__class__.__name__},
**exc.kwargs))
return cart.encode()
class AddItem(CartView):
"""Add an item to the cart.
This view expects `request.POST` to contain:
+------------+----------------------------------------------------+
| key | value |
+============+====================================================+
| `pk` | the primary key of an item to add |
+------------+----------------------------------------------------+
| `quantity` | a quantity that should be associated with the item |
+------------+----------------------------------------------------+
The `quantity` parameter is optional (defaults to 1).
"""
action = 'add'
required_params = ('pk',)
optional_params = {'quantity': 1}
class ChangeItemQuantity(CartView):
"""Change the quantity associated with an item.
This view expects `request.POST` to contain:
+------------+----------------------------------------------------+
| key | value |
+============+====================================================+
| `pk` | the primary key of an item |
+------------+----------------------------------------------------+
| `quantity` | a new quantity to associate with the item |
+------------+----------------------------------------------------+
"""
action = 'change_quantity'
required_params = ('pk', 'quantity')
class RemoveItem(CartView):
"""Remove an item from the cart.
Expects `request.POST` to contain key *pk*. The associated value
should be the primary key of an item you wish to remove.
"""
action = 'remove'
required_params = ('pk',)
class EmptyCart(CartView):
"""Remove all items from the cart."""
action = 'empty'
| {
"repo_name": "nevimov/django-easycart",
"path": "easycart/views.py",
"copies": "1",
"size": "5228",
"license": "mit",
"hash": 2665307708749407700,
"line_mean": 33.6225165563,
"line_max": 75,
"alpha_frac": 0.5730680949,
"autogenerated": false,
"ratio": 4.565938864628821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 151
} |
"""A setup module for conda-merge"""
import os
from setuptools import setup
from codecs import open
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='conda-merge',
version='0.2.0',
description='Tool for merging conda environment files',
long_description=long_description,
url='https://github.com/amitbeka/conda-merge',
author='Amit Beka',
author_email='amit.beka@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Build Tools',
'Topic :: System :: Installation/Setup',
],
keywords='conda anaconda environment',
py_modules=["conda_merge"],
install_requires=['pyyaml'],
python_requires='>=3',
extras_require={
'test': ['pytest'],
},
entry_points={
'console_scripts': [
'conda-merge=conda_merge:main',
],
},
)
| {
"repo_name": "amitbeka/conda-merge",
"path": "setup.py",
"copies": "1",
"size": "1252",
"license": "mit",
"hash": -1132413868706350200,
"line_mean": 23.5490196078,
"line_max": 67,
"alpha_frac": 0.6094249201,
"autogenerated": false,
"ratio": 3.888198757763975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49976236778639754,
"avg_score": null,
"num_lines": null
} |
"""A setup module for portbalance."""
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
# Adapted from https://stackoverflow.com/questions/26799894/installing-nltk-data-in-setup-py-script
# Subclasses default installer
class Install(_install):
def run(self):
_install.do_egg_install(self)
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
setup(
name='cfgen',
version='0.1.0',
author='William Gilpin',
author_email='wgilpin@stanford.edu',
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
],
description='Uses a combination of Markov chains and context-free-grammars to'
+' generate random sentences with features of both language models.',
keywords="finance portfolio rebalancing stock",
python_requires='>=3',
cmdclass={'install': Install},
install_requires=[
'numpy',
'nltk'
],
setup_requires=['nltk'],
include_package_data=True,
# dependency_links = ['git+https://github.com/emilmont/pyStatParser@master#egg=pyStatParser-0.0.1'],
# dependency_links=['https://github.com/emilmont/pyStatParser/tarball/master#egg=pyStatParser-0.0.1'],
dependency_links = ['git+https://github.com/emilmont/pyStatParser.git#egg=pyStatParser-0.0.1'],
packages=["cfgen"],
url='https://github.com/williamgilpin/cfgen'
) | {
"repo_name": "williamgilpin/cfgen",
"path": "setup.py",
"copies": "1",
"size": "1483",
"license": "mit",
"hash": -2035646535183798800,
"line_mean": 36.1,
"line_max": 106,
"alpha_frac": 0.6803776129,
"autogenerated": false,
"ratio": 3.5734939759036144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9579785968915424,
"avg_score": 0.03481712397763813,
"num_lines": 40
} |
"""A setup module for psidPy
Based on the pypa sample project.
A tool to download data and build psid panels based on psidR by Florian Oswald.
See:
https://github.com/floswald/psidR
https://github.com/tyler-abbot/psidPy
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='psid_py',
version='1.0.2',
description='A tool to build PSID panels.',
# The project's main homepage
url='https://github.com/tyler-abbot/psidPy',
# Author details
author='Tyler Abbot',
author_email='tyler.abbot@sciencespo.fr',
# Licensing information
license='MIT',
classifiers=[
#How mature is this project?
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
# What does your project relate to?
keywords='statistics econometrics data',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests',
'pandas',
'beautifulsoup4'],
)
| {
"repo_name": "tyler-abbot/psid_py",
"path": "setup.py",
"copies": "1",
"size": "2486",
"license": "mit",
"hash": -7097989396776916000,
"line_mean": 32.1466666667,
"line_max": 79,
"alpha_frac": 0.6452131939,
"autogenerated": false,
"ratio": 4.016155088852988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161368282752988,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Bigtable library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-bigtable-v2>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-bigtable-v2',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Bigtable API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.bigtable', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/gapic-google-bigtable-v2/setup.py",
"copies": "9",
"size": "1519",
"license": "bsd-3-clause",
"hash": 1259415437697608400,
"line_mean": 33.5227272727,
"line_max": 105,
"alpha_frac": 0.646477946,
"autogenerated": false,
"ratio": 3.6252983293556085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002144082332761578,
"num_lines": 44
} |
"""A setup module for the GAPIC Cloud Spanner API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-spanner-v1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-spanner-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Cloud Spanner API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.spanner'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "landrito/api-client-staging",
"path": "generated/python/gapic-google-cloud-spanner-v1/setup.py",
"copies": "7",
"size": "1559",
"license": "bsd-3-clause",
"hash": 9081711915040300000,
"line_mean": 32.8913043478,
"line_max": 70,
"alpha_frac": 0.6407953817,
"autogenerated": false,
"ratio": 3.642523364485981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7783318746185981,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Cloud Spanner Database Admin API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-spanner-admin-database-v1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpc-google-iam-v1>=0.11.1, <0.12dev',
]
setup(
name='gapic-google-cloud-spanner-admin-database-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Cloud Spanner Database Admin API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.spanner_admin_database'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/gapic-google-cloud-spanner-admin-database-v1/setup.py",
"copies": "7",
"size": "1678",
"license": "bsd-3-clause",
"hash": 7286386547895474000,
"line_mean": 34.7021276596,
"line_max": 75,
"alpha_frac": 0.6489868892,
"autogenerated": false,
"ratio": 3.5931477516059958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 47
} |
"""A setup module for the GAPIC Cloud Spanner Instance Admin API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-spanner-admin-instance-v1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpc-google-iam-v1>=0.11.1, <0.12dev',
]
setup(
name='gapic-google-cloud-spanner-admin-instance-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Cloud Spanner Instance Admin API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.spanner_admin_instance'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "googleapis/api-client-staging",
"path": "generated/python/gapic-google-cloud-spanner-admin-instance-v1/setup.py",
"copies": "7",
"size": "1678",
"license": "bsd-3-clause",
"hash": -8611177985433388000,
"line_mean": 34.7021276596,
"line_max": 75,
"alpha_frac": 0.6489868892,
"autogenerated": false,
"ratio": 3.5931477516059958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7742134640805995,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Datastore library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-datastore-v1>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-datastore-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Datastore API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.datastore', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/gapic-google-datastore-v1/setup.py",
"copies": "2",
"size": "1524",
"license": "bsd-3-clause",
"hash": 2173300392086918000,
"line_mean": 33.6363636364,
"line_max": 106,
"alpha_frac": 0.6476377953,
"autogenerated": false,
"ratio": 3.6372315035799523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021240441801189462,
"num_lines": 44
} |
"""A setup module for the GAPIC DLP API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-dlp-v2beta1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-dlp-v2beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the DLP API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.privacy', 'google.cloud.gapic.privacy.dlp'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "eoogbe/api-client-staging",
"path": "generated/python/gapic-google-cloud-dlp-v2beta1/setup.py",
"copies": "7",
"size": "1575",
"license": "bsd-3-clause",
"hash": 6655127846520566000,
"line_mean": 33.2391304348,
"line_max": 70,
"alpha_frac": 0.6406349206,
"autogenerated": false,
"ratio": 3.5876993166287017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""A setup module for the GAPIC Google Cloud Datastore API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-datastore-v1[grpc]>=0.90.4, <0.91dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-datastore-v1',
version='0.90.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Cloud Datastore API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.datastore'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "googleapis/api-client-staging",
"path": "generated/python/gapic-google-cloud-datastore-v1/setup.py",
"copies": "7",
"size": "1582",
"license": "bsd-3-clause",
"hash": -6160567260167679000,
"line_mean": 33.3913043478,
"line_max": 70,
"alpha_frac": 0.6447534766,
"autogenerated": false,
"ratio": 3.7049180327868854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""A setup module for the GAPIC Google Cloud Functions API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-functions-v1beta2[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-functions-v1beta2',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Cloud Functions API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.functions'],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "eoogbe/api-client-staging",
"path": "generated/python/gapic-google-cloud-functions-v1beta2/setup.py",
"copies": "7",
"size": "1543",
"license": "bsd-3-clause",
"hash": -5573995399755826,
"line_mean": 34.8837209302,
"line_max": 76,
"alpha_frac": 0.6545690214,
"autogenerated": false,
"ratio": 3.736077481840194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 43
} |
"""A setup module for the GAPIC Google Cloud Natural Language API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-language-v1beta2[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-language-v1beta2',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Cloud Natural Language API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.language'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "shinfan/api-client-staging",
"path": "generated/python/gapic-google-cloud-language-v1beta2/setup.py",
"copies": "7",
"size": "1604",
"license": "bsd-3-clause",
"hash": -5341597621795642000,
"line_mean": 33.8695652174,
"line_max": 74,
"alpha_frac": 0.6483790524,
"autogenerated": false,
"ratio": 3.6873563218390806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""A setup module for the GAPIC Google Cloud Pub/Sub API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-pubsub-v1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpc-google-iam-v1>=0.11.1, <0.12dev',
]
setup(
name='gapic-google-cloud-pubsub-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Cloud Pub/Sub API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.pubsub'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "garrettjonesgoogle/api-client-staging",
"path": "generated/python/gapic-google-cloud-pubsub-v1/setup.py",
"copies": "7",
"size": "1614",
"license": "bsd-3-clause",
"hash": 861698761827902100,
"line_mean": 33.3404255319,
"line_max": 70,
"alpha_frac": 0.6387856258,
"autogenerated": false,
"ratio": 3.555066079295154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7693851705095154,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Google Cloud Speech API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-speech-v1beta1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-speech-v1beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Cloud Speech API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.speech'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/gapic-google-cloud-speech-v1beta1/setup.py",
"copies": "7",
"size": "1578",
"license": "bsd-3-clause",
"hash": 7772377161766008000,
"line_mean": 33.3043478261,
"line_max": 70,
"alpha_frac": 0.6438529785,
"autogenerated": false,
"ratio": 3.644341801385681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""A setup module for the GAPIC Google Cloud Vision API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-vision-v1[grpc]>=0.90.3, <0.91dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-vision-v1',
version='0.90.3',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Cloud Vision API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.vision'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "garrettjonesgoogle/api-client-staging",
"path": "generated/python/gapic-google-cloud-vision-v1/setup.py",
"copies": "7",
"size": "1567",
"license": "bsd-3-clause",
"hash": 1142480688599873500,
"line_mean": 33.0652173913,
"line_max": 70,
"alpha_frac": 0.6413529036,
"autogenerated": false,
"ratio": 3.669789227166276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7811142130766277,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Google library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-longrunning',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Google API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "ethanbao/api-client-staging-1",
"path": "generated/python/gapic-google-longrunning-v1/setup.py",
"copies": "1",
"size": "1433",
"license": "bsd-3-clause",
"hash": -1676079860708739300,
"line_mean": 32.3255813953,
"line_max": 74,
"alpha_frac": 0.6448011165,
"autogenerated": false,
"ratio": 3.7710526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9915853748078947,
"avg_score": 0,
"num_lines": 43
} |
"""A setup module for the GAPIC IAM Admin library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.4, <2.0.0',
'google-gax>=0.14.1, <0.15.0',
'grpc-google-iam-admin-v1>=0.10.0, <0.11.0',
'grpc-google-iam-v1>=0.10.0, <0.11.0',
'oauth2client>=1.4.11, <2.0.0',
]
setup(
name='gapic-google-iam-admin-v1',
version='0.10.0',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google IAM Admin API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.iam_admin', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/gapic-google-iam-admin-v1/setup.py",
"copies": "2",
"size": "1556",
"license": "bsd-3-clause",
"hash": -1741879462868789800,
"line_mean": 33.5777777778,
"line_max": 106,
"alpha_frac": 0.6388174807,
"autogenerated": false,
"ratio": 3.4966292134831463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135446694183147,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Language library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-cloud-language-v1beta1>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-cloud-language-v1beta1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Language API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.language', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/gapic-google-cloud-language-v1beta1/setup.py",
"copies": "3",
"size": "1541",
"license": "bsd-3-clause",
"hash": -3652107157672690700,
"line_mean": 34.0227272727,
"line_max": 105,
"alpha_frac": 0.6502271252,
"autogenerated": false,
"ratio": 3.6258823529411766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002144082332761578,
"num_lines": 44
} |
"""A setup module for the GAPIC Logging library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-logging-v2>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-logging-v2',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Logging API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.logging', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/gapic-google-logging-v2/setup.py",
"copies": "3",
"size": "1519",
"license": "bsd-3-clause",
"hash": -1023580291421330600,
"line_mean": 33.5227272727,
"line_max": 104,
"alpha_frac": 0.646477946,
"autogenerated": false,
"ratio": 3.6339712918660285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5780449237866029,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Monitoring library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-monitoring-v3>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-monitoring-v3',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Monitoring API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.monitoring', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/gapic-google-monitoring-v3/setup.py",
"copies": "3",
"size": "1534",
"license": "bsd-3-clause",
"hash": -1142565635741038000,
"line_mean": 33.8636363636,
"line_max": 107,
"alpha_frac": 0.649934811,
"autogenerated": false,
"ratio": 3.6523809523809523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5802315763380953,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Pubsub library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-pubsub-v1>=0.11.1, <0.12.0dev',
'grpc-google-iam-v1>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-pubsub-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Pubsub API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.pubsub', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/gapic-google-pubsub-v1/setup.py",
"copies": "3",
"size": "1555",
"license": "bsd-3-clause",
"hash": -993074803955421300,
"line_mean": 33.5555555556,
"line_max": 103,
"alpha_frac": 0.6418006431,
"autogenerated": false,
"ratio": 3.486547085201794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021367521367521368,
"num_lines": 45
} |
"""A setup module for the GAPIC Speech library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-cloud-speech-v1beta1>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-cloud-speech-v1beta1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Speech API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.speech', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/gapic-google-cloud-speech-v1beta1/setup.py",
"copies": "3",
"size": "1531",
"license": "bsd-3-clause",
"hash": 6008346687626829000,
"line_mean": 33.7954545455,
"line_max": 103,
"alpha_frac": 0.6479425212,
"autogenerated": false,
"ratio": 3.5687645687645686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5716707089964569,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Stackdriver Error Reporting API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-error-reporting-v1beta1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-error-reporting-v1beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Error Reporting API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.errorreporting'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "googleapis/api-client-staging",
"path": "generated/python/gapic-google-cloud-error-reporting-v1beta1/setup.py",
"copies": "7",
"size": "1620",
"license": "bsd-3-clause",
"hash": 7864910325953694000,
"line_mean": 34.2173913043,
"line_max": 73,
"alpha_frac": 0.6518518519,
"autogenerated": false,
"ratio": 3.6986301369863015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7850481988886302,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Stackdriver Error Reporting library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-cloud-error-reporting-v1beta1>=0.13.0, <0.14.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-cloud-error-reporting-v1beta1',
version='0.13.0',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Error Reporting API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.errorreporting', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/gapic-google-cloud-error-reporting-v1beta1/setup.py",
"copies": "1",
"size": "1592",
"license": "bsd-3-clause",
"hash": 2992842999381385000,
"line_mean": 35.1818181818,
"line_max": 111,
"alpha_frac": 0.6582914573,
"autogenerated": false,
"ratio": 3.643020594965675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9800297441876065,
"avg_score": 0.0002029220779220779,
"num_lines": 44
} |
"""A setup module for the GAPIC Stackdriver Logging API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-logging-v2[grpc]>=0.91.4, <0.92dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-logging-v2',
version='0.91.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Logging API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.logging'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "eoogbe/api-client-staging",
"path": "generated/python/gapic-google-cloud-logging-v2/setup.py",
"copies": "7",
"size": "1570",
"license": "bsd-3-clause",
"hash": -6923443676872770000,
"line_mean": 33.1304347826,
"line_max": 70,
"alpha_frac": 0.6433121019,
"autogenerated": false,
"ratio": 3.6941176470588237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7837429748958824,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Stackdriver Monitoring API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-monitoring-v3[grpc]>=0.15.3, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-monitoring-v3',
version='0.15.3',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Monitoring API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.monitoring'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/gapic-google-cloud-monitoring-v3/setup.py",
"copies": "7",
"size": "1586",
"license": "bsd-3-clause",
"hash": -2383384144374626000,
"line_mean": 33.4782608696,
"line_max": 70,
"alpha_frac": 0.6469104666,
"autogenerated": false,
"ratio": 3.7142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7861196180885714,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Stackdriver Trace API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'google-cloud-core >= 0.26.0, < 0.27dev',
]
setup(
name='google-cloud-trace',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Trace API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.trace'
],
url='https://github.com/googleapis/googleapis')
| {
"repo_name": "tartavull/google-cloud-python",
"path": "trace/setup.py",
"copies": "2",
"size": "1492",
"license": "apache-2.0",
"hash": 4979035828460995000,
"line_mean": 32.9090909091,
"line_max": 70,
"alpha_frac": 0.6387399464,
"autogenerated": false,
"ratio": 3.825641025641026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 44
} |
"""A setup module for the GAPIC Stackdriver Trace library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-devtools-cloudtrace-v1>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-devtools-cloudtrace-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Trace API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.trace', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/gapic-google-devtools-cloudtrace-v1/setup.py",
"copies": "9",
"size": "1549",
"license": "bsd-3-clause",
"hash": -1179220023614370800,
"line_mean": 34.2045454545,
"line_max": 102,
"alpha_frac": 0.6513879923,
"autogenerated": false,
"ratio": 3.636150234741784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8787538227041783,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GAPIC Vision library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-cloud-vision-v1>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-cloud-vision-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Google Vision API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.vision', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/gapic-google-cloud-vision-v1/setup.py",
"copies": "3",
"size": "1521",
"license": "bsd-3-clause",
"hash": -3872981007164756000,
"line_mean": 33.5681818182,
"line_max": 103,
"alpha_frac": 0.6456278764,
"autogenerated": false,
"ratio": 3.595744680851064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5741372557251064,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the google apis common protos
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'protobuf>=3.0.0'
]
extras_require = {
'grpc': ['grpcio>=1.0.0']
}
setuptools.setup(
name='googleapis-common-protos',
version='1.3.5',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='Common protobufs used in Google APIs',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.logging', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "ethanbao/api-client-staging-1",
"path": "generated/python/googleapis-common-protos/setup.py",
"copies": "2",
"size": "1282",
"license": "bsd-3-clause",
"hash": 3472802512170907600,
"line_mean": 26.8695652174,
"line_max": 66,
"alpha_frac": 0.6879875195,
"autogenerated": false,
"ratio": 3.8041543026706233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""A setup module for the GRPC Cloud Spanner API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-spanner-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Cloud Spanner API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.spanner', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/proto-google-cloud-spanner-v1/setup.py",
"copies": "7",
"size": "1503",
"license": "bsd-3-clause",
"hash": -8283254453291140000,
"line_mean": 29.06,
"line_max": 100,
"alpha_frac": 0.6793080506,
"autogenerated": false,
"ratio": 3.5116822429906542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7690990293590654,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC Cloud Spanner Database Admin API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-spanner-admin-database-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Cloud Spanner Database Admin API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.spanner.admin.database', 'google.cloud.proto.spanner.admin', 'google.cloud.proto.spanner', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/proto-google-cloud-spanner-admin-database-v1/setup.py",
"copies": "7",
"size": "1629",
"license": "bsd-3-clause",
"hash": -3882361953797961700,
"line_mean": 31.58,
"line_max": 181,
"alpha_frac": 0.6899938613,
"autogenerated": false,
"ratio": 3.5259740259740258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7715967887274026,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC Cloud Spanner Instance Admin API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-spanner-admin-instance-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Cloud Spanner Instance Admin API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.spanner.admin.instance', 'google.cloud.proto.spanner.admin', 'google.cloud.proto.spanner', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "swcloud/api-client-staging",
"path": "generated/python/proto-google-cloud-spanner-admin-instance-v1/setup.py",
"copies": "7",
"size": "1629",
"license": "bsd-3-clause",
"hash": -7265288659098121000,
"line_mean": 31.58,
"line_max": 181,
"alpha_frac": 0.6899938613,
"autogenerated": false,
"ratio": 3.5259740259740258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001098901098901099,
"num_lines": 50
} |
"""A setup module for the GRPC DLP API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-dlp-v2beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the DLP API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.privacy.dlp', 'google.cloud.proto.privacy', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "shinfan/api-client-staging",
"path": "generated/python/proto-google-cloud-dlp-v2beta1/setup.py",
"copies": "7",
"size": "1518",
"license": "bsd-3-clause",
"hash": 1383008718159483400,
"line_mean": 29.36,
"line_max": 134,
"alpha_frac": 0.6785243742,
"autogenerated": false,
"ratio": 3.473684210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7652208584726317,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC google-bigtable service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-bigtable-v2',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-bigtable-v2 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.bigtable', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/grpc-google-bigtable-v2/setup.py",
"copies": "9",
"size": "1340",
"license": "bsd-3-clause",
"hash": 8531675239363650000,
"line_mean": 30.1627906977,
"line_max": 66,
"alpha_frac": 0.6843283582,
"autogenerated": false,
"ratio": 3.6021505376344085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.878647889583441,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC Google Cloud Datastore API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-datastore-v1',
version='0.90.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Google Cloud Datastore API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.datastore', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "garrettjonesgoogle/api-client-staging",
"path": "generated/python/proto-google-cloud-datastore-v1/setup.py",
"copies": "7",
"size": "1524",
"license": "bsd-3-clause",
"hash": 6101631680157536000,
"line_mean": 29.48,
"line_max": 102,
"alpha_frac": 0.6824146982,
"autogenerated": false,
"ratio": 3.560747663551402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019417475728155338,
"num_lines": 50
} |
"""A setup module for the GRPC Google Cloud Functions API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-functions-v1beta2',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Google Cloud Functions API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.functions', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "swcloud/api-client-staging",
"path": "generated/python/proto-google-cloud-functions-v1beta2/setup.py",
"copies": "7",
"size": "1530",
"license": "bsd-3-clause",
"hash": 388820057009883700,
"line_mean": 29.6,
"line_max": 102,
"alpha_frac": 0.6836601307,
"autogenerated": false,
"ratio": 3.574766355140187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019417475728155338,
"num_lines": 50
} |
"""A setup module for the GRPC google-cloud-language service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-cloud-language-v1beta1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-cloud-language-v1beta1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.grpc', 'google.cloud.grpc.language', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/grpc-google-cloud-language-v1beta1/setup.py",
"copies": "3",
"size": "1416",
"license": "bsd-3-clause",
"hash": 457350649328146000,
"line_mean": 31.9302325581,
"line_max": 101,
"alpha_frac": 0.6899717514,
"autogenerated": false,
"ratio": 3.557788944723618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5747760696123618,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC Google Cloud Natural Language API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-language-v1beta2',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Google Cloud Natural Language API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.language', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "swcloud/api-client-staging",
"path": "generated/python/proto-google-cloud-language-v1beta2/setup.py",
"copies": "7",
"size": "1542",
"license": "bsd-3-clause",
"hash": 3689337529660708400,
"line_mean": 29.84,
"line_max": 101,
"alpha_frac": 0.6848249027,
"autogenerated": false,
"ratio": 3.552995391705069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000196078431372549,
"num_lines": 50
} |
"""A setup module for the GRPC Google Cloud Pub/Sub API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-pubsub-v1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Google Cloud Pub/Sub API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.pubsub', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "eoogbe/api-client-staging",
"path": "generated/python/proto-google-cloud-pubsub-v1/setup.py",
"copies": "7",
"size": "1515",
"license": "bsd-3-clause",
"hash": 6693568408892501000,
"line_mean": 29.3,
"line_max": 99,
"alpha_frac": 0.6792079208,
"autogenerated": false,
"ratio": 3.5069444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002,
"num_lines": 50
} |
"""A setup module for the GRPC Google Cloud Speech API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-speech-v1beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Google Cloud Speech API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.speech', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/proto-google-cloud-speech-v1beta1/setup.py",
"copies": "7",
"size": "1518",
"license": "bsd-3-clause",
"hash": -1134717863775679700,
"line_mean": 29.36,
"line_max": 99,
"alpha_frac": 0.6811594203,
"autogenerated": false,
"ratio": 3.513888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002,
"num_lines": 50
} |
"""A setup module for the GRPC google-cloud-speech service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-cloud-speech-v1beta1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-cloud-speech-v1beta1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.grpc', 'google.cloud.grpc.speech', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/grpc-google-cloud-speech-v1beta1/setup.py",
"copies": "3",
"size": "1408",
"license": "bsd-3-clause",
"hash": -5700958066181123000,
"line_mean": 31.7441860465,
"line_max": 99,
"alpha_frac": 0.6882102273,
"autogenerated": false,
"ratio": 3.5376884422110555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5725898669511056,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC Google Cloud Vision API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-vision-v1',
version='0.90.3',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Google Cloud Vision API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.vision', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "shinfan/api-client-staging",
"path": "generated/python/proto-google-cloud-vision-v1/setup.py",
"copies": "7",
"size": "1512",
"license": "bsd-3-clause",
"hash": -5462191022179193000,
"line_mean": 29.24,
"line_max": 99,
"alpha_frac": 0.6798941799,
"autogenerated": false,
"ratio": 3.532710280373832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7712604460273832,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC google-cloud-vision service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-cloud-vision-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-cloud-vision-v1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.grpc', 'google.cloud.grpc.vision', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/grpc-google-cloud-vision-v1/setup.py",
"copies": "3",
"size": "1398",
"license": "bsd-3-clause",
"hash": -8434122823176921000,
"line_mean": 31.511627907,
"line_max": 99,
"alpha_frac": 0.6859799714,
"autogenerated": false,
"ratio": 3.548223350253807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5734203321653807,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC google-datastore service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-datastore-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-datastore-v1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.datastore', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/grpc-google-datastore-v1/setup.py",
"copies": "2",
"size": "1344",
"license": "bsd-3-clause",
"hash": 4482317552393943600,
"line_mean": 30.2558139535,
"line_max": 66,
"alpha_frac": 0.6852678571,
"autogenerated": false,
"ratio": 3.6129032258064515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 43
} |
"""A setup module for the GRPC google-devtools-cloudtrace service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=1.4.11',
'grpcio>=1.0.0',
'googleapis-common-protos[grpc]>=1.3.2'
]
setuptools.setup(
name='grpc-google-devtools-cloudtrace-v1',
version='0.9.0',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-devtools-cloudtrace-v1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.devtools', 'google.devtools.cloudtrace', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "ethanbao/api-client-staging-1",
"path": "generated/python/grpc-google-devtools-cloudtrace-v1/setup.py",
"copies": "1",
"size": "1194",
"license": "bsd-3-clause",
"hash": 3683011183866248000,
"line_mean": 29.6153846154,
"line_max": 83,
"alpha_frac": 0.7093802345,
"autogenerated": false,
"ratio": 3.6738461538461538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48832263883461535,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC google-iam-admin service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=1.4.11, <2.0.0',
'grpcio>=1.0.0, <2.0.0',
'googleapis-common-protos[grpc]>=1.3.4, <2.0.0'
]
setuptools.setup(
name='grpc-google-iam-admin-v1',
version='0.10.0',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-iam-admin-v1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.iam', 'google.iam.admin', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "ethanbao/api-client-staging-1",
"path": "generated/python/grpc-google-iam-admin-v1/setup.py",
"copies": "2",
"size": "1350",
"license": "bsd-3-clause",
"hash": 5754928018308395000,
"line_mean": 30.3953488372,
"line_max": 68,
"alpha_frac": 0.68,
"autogenerated": false,
"ratio": 3.5340314136125652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214031413612565,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC google-iam service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-iam-v1',
version='0.11.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-iam-v1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.iam', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "pongad/api-client-staging",
"path": "generated/python/grpc-google-iam-v1/setup.py",
"copies": "2",
"size": "1284",
"license": "bsd-3-clause",
"hash": -7473457479642744000,
"line_mean": 29.5714285714,
"line_max": 66,
"alpha_frac": 0.6822429907,
"autogenerated": false,
"ratio": 3.647727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5329970263427274,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC google-logging service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-logging-v2',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-logging-v2 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.logging', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "michaelbausor/api-client-staging",
"path": "generated/python/grpc-google-logging-v2/setup.py",
"copies": "3",
"size": "1336",
"license": "bsd-3-clause",
"hash": 1506024306381504,
"line_mean": 30.0697674419,
"line_max": 66,
"alpha_frac": 0.6833832335,
"autogenerated": false,
"ratio": 3.5913978494623655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 43
} |
"""A setup module for the GRPC google-monitoring service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.4, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-monitoring-v3',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-monitoring-v3 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.monitoring', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "ethanbao/api-client-staging-1",
"path": "generated/python/grpc-google-monitoring-v3/setup.py",
"copies": "1",
"size": "1348",
"license": "bsd-3-clause",
"hash": 293291216804337900,
"line_mean": 30.3488372093,
"line_max": 66,
"alpha_frac": 0.6862017804,
"autogenerated": false,
"ratio": 3.6236559139784945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9809857694378494,
"avg_score": 0,
"num_lines": 43
} |
"""A setup module for the GRPC google-pubsub service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.5, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-pubsub-v1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the google-pubsub-v1 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.pubsub', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "ethanbao/api-client-staging-1",
"path": "generated/python/grpc-google-pubsub-v1/setup.py",
"copies": "3",
"size": "1332",
"license": "bsd-3-clause",
"hash": -5689781425230723000,
"line_mean": 29.976744186,
"line_max": 66,
"alpha_frac": 0.6824324324,
"autogenerated": false,
"ratio": 3.5806451612903225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5763077593690322,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC packager service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=0.4.1, <0.5.0dev',
'grpcio>=0.15.0, <0.16.0dev',
'googleapis-common-protos[grpc]>=1.2.0, <2.0.0dev'
]
setuptools.setup(
name='packager-unittest-v2',
version='1.0.0',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the packager-v2 service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='BSD-3-Clause',
packages=find_packages(),
namespace_packages=['pkgTop', 'pkgTop.pkgNext', ],
url='https://github.com/google/googleapis'
)
| {
"repo_name": "googleapis/packman",
"path": "test/fixtures/python/setup.py",
"copies": "1",
"size": "1321",
"license": "apache-2.0",
"hash": 6511745982683458000,
"line_mean": 29.7209302326,
"line_max": 66,
"alpha_frac": 0.6820590462,
"autogenerated": false,
"ratio": 3.5510752688172045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9733134315017204,
"avg_score": 0,
"num_lines": 43
} |
"""A setup module for the GRPC Stackdriver Error Reporting API service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'googleapis-common-protos>=1.5.2, <2.0dev',
'oauth2client>=2.0.0, <4.0dev',
]
extras_require = {
'grpc': [
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
'grpcio>=1.0.2, <2.0dev',
],
}
setuptools.setup(
name='proto-google-cloud-error-reporting-v1beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Stackdriver Error Reporting API',
long_description=open('README.rst').read(),
install_requires=install_requires,
extras_require=extras_require,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google.cloud.proto.devtools.clouderrorreporting', 'google.cloud.proto.devtools', 'google.cloud.proto', 'google.cloud', 'google'],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "googleapis/api-client-staging",
"path": "generated/python/proto-google-cloud-error-reporting-v1beta1/setup.py",
"copies": "7",
"size": "1596",
"license": "bsd-3-clause",
"hash": 4459471351671020000,
"line_mean": 30.92,
"line_max": 152,
"alpha_frac": 0.6911027569,
"autogenerated": false,
"ratio": 3.55456570155902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7745668458459019,
"avg_score": null,
"num_lines": null
} |
"""A setup module for the GRPC Stackdriver Error Reporting service.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import setuptools
from setuptools import setup, find_packages
install_requires = [
'oauth2client>=2.0.0, <4.0.0dev',
'grpcio>=1.0.0, <2.0.0dev',
'googleapis-common-protos[grpc]>=1.3.4, <2.0.0dev'
]
setuptools.setup(
name='grpc-google-devtools-clouderrorreporting-v1beta1',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GRPC library for the Stackdriver Error Reporting service',
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.devtools', 'google.devtools.clouderrorreporting', ],
url='https://github.com/googleapis/googleapis'
)
| {
"repo_name": "geigerj/api-client-staging",
"path": "generated/python/grpc-google-devtools-clouderrorreporting-v1beta1/setup.py",
"copies": "1",
"size": "1425",
"license": "bsd-3-clause",
"hash": -9089221541376350000,
"line_mean": 32.1395348837,
"line_max": 92,
"alpha_frac": 0.6975438596,
"autogenerated": false,
"ratio": 3.635204081632653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4832747941232653,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.