blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f15d8c06adbffe58939dc18c9c98c17ac28126ef | Python | cyrillbolliger/fido2-client-txAuthSimple | /main.py | UTF-8 | 1,062 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
from client import Client
# defaults
rp_url = "http://localhost:9005"
# start reading cli arguments
parser = argparse.ArgumentParser()
parser.add_argument("action",
help="'register' an authenticator/user or 'authorize' a transaction",
choices=['register', 'authorize'])
parser.add_argument("user",
help="username")
parser.add_argument("--rp_url", help="URL of the relying party", default=rp_url)
parser.add_argument("--pin", help="authenticator pin", default="")
parser.add_argument("--tx", help="transaction details: string that should be shown to the user")
parser.add_argument("--tx_attack",
help="simulate evil client and overwrite the transaction details with the given string")
args = parser.parse_args()
# launch program
client = Client(str(args.rp_url), str(args.user), str(args.pin))
if 'authorize' == args.action:
client.authorize(str(args.tx), str(args.tx_attack))
elif 'register' == args.action:
client.register()
| true |
0fb704d7b1d295316ae00d66e966d641d3d02fa9 | Python | kingspp/epyodbc | /src/epyodbc/constructs/column.py | UTF-8 | 1,312 | 2.84375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
| **@created on:** 9/4/20,
| **@author:** prathyushsp,
| **@version:** v0.0.1
|
| **Description:**
|
|
| **Sphinx Documentation Status:**
"""
from epyodbc.constructs.base_class import BaseClass
from epyodbc.constructs.foreign_key import ForeignKey
import json
class Column(BaseClass):
def __init__(self, column_name: str,
dtype: str,
length: int,
allow_null: bool,
is_primary_key: bool,
is_foreign_key: bool,
foreign_key: ForeignKey,
# is_unique: bool,
# default: typing.Optional
):
self.column_name = column_name
self.dtype = dtype
self.length = length
self.allow_null = allow_null
self.is_primary_key = is_primary_key
self.is_foreign_key = is_foreign_key
self.foreign_key = foreign_key
# self.is_unique = is_unique
# self.default = default
def pretty(self):
# ret = f"{self.column_name} {self.dtype}({self.length})"
ret = f"{self.dtype}({self.length})"
if self.is_primary_key:
ret += "| PRIMARY "
if self.is_foreign_key:
ret += f"| FK ({self.foreign_key.pretty()})"
return ret
| true |
211ed44cf1907f94029ac0fdd6ee1343a81e909d | Python | pougivan/LED_Analyze | /build/lib/LED_Analyze/calc/calc_led.py | UTF-8 | 62,902 | 2.765625 | 3 | [
"MIT"
] | permissive | """
LED_Analyze - calc_basic
Date: 15 June 2018
By: Ivan Pougatchev
Version: 1.0
"""
import numpy as np
from os.path import exists
from analysis_main import DataObject
from calc.calc_common import CalcProcess
class CropCtr(CalcProcess):
"""
Take an array of luminance data which is assumed to be a bright
light on a background of dark noise. Take a 16x16 sample of the
dark noise from the upper left corner of the array. Using this data
determine a noise gate, as well as a count threshold, find the
center of the light, and crop to within a specified distance.
"""
gui_commands = [
("SelectNameLabelFrame",
("lum_in", "out_name_lum", "out_lum_to_db"),
("Input:", "Output Name:", "Luminance Data", 400, (60, 100)),
True),
("SelectNameLabelFrame",
("c_x_in", "out_name_x", "out_x_to_db"),
("Input:", "Output Name:", "Chromaticity X Data", 400, (60, 100)),
True),
("SelectNameLabelFrame",
("c_y_in", "out_name_y", "out_y_to_db"),
("Input:", "Output Name:", "Chromaticity Y Data", 400, (60, 100)),
True),
("SelectNameLabelFrame",
(("x_min_in",
"x_max_in",
"y_min_in",
"y_max_in"), "out_name_dims", "out_dims_to_db"),
(("Hor. Min:",
"Hor. Max",
"Vert. Min:",
"Vert. Max:"), "Output Name:", "Dimensions", 400, (60, 100)),
False),
("SpboxFrame", "count", ("Threshold:", list(range(1, 33)), 200, 75)),
("SpboxFrame", "dist", ("Margin:", list(range(0, 65)), 200, 75)),
("SpboxFrame", "rotate", ("Rotate:", [0, 90, 180, 270], 200, 75))]
def __init__(
self, out_name_lum, out_name_x, out_name_y, out_name_dims,
out_lum_to_db, out_x_to_db, out_y_to_db, out_dims_to_db,
lum_in, c_x_in, c_y_in, x_min_in, x_max_in, y_min_in, y_max_in,
count, dist, rotate):
self.name = "Center Crop "
self.lum_in = lum_in
self.c_x_in = c_x_in
self.c_y_in = c_y_in
self.out_name_lum = out_name_lum
self.out_name_x = out_name_x
self.out_name_y = out_name_y
self.out_name_dims = out_name_dims
self.out_lum_to_db = out_lum_to_db
self.out_x_to_db = out_x_to_db
self.out_y_to_db = out_y_to_db
self.out_dims_to_db = out_dims_to_db
self.x_min_in = x_min_in
self.x_max_in = x_max_in
self.y_min_in = y_min_in
self.y_max_in = y_max_in
self.count = int(count)
self.dist = int(dist)
self.rotate = int(rotate)
CalcProcess.__init__(
self,
[lum_in.calc, c_x_in.calc, c_y_in.calc,
x_min_in.calc, x_max_in.calc, y_min_in.calc, y_max_in.calc])
self.inputs = [
lum_in.name, c_x_in.name, c_y_in.name, x_min_in.name,
x_max_in.name, y_min_in.name, y_max_in.name]
self.outputs.extend([
DataObject(self.out_name_lum, np.array([]),
self, self.out_lum_to_db),
DataObject(self.out_name_x, np.array([]),
self, self.out_x_to_db),
DataObject(self.out_name_y, np.array([]),
self, self.out_y_to_db),
DataObject(self.out_name_dims, np.array([]),
self, self.out_dims_to_db)])
def check(self):
if len(set(
(self.out_name_lum,
self.out_name_x,
self.out_name_y,
self.out_name_dims))) != 4:
return "Output names must be unique."
else:
return None
def run(self):
original_rng = self.lum_in.read_data
c_x_out = self.c_x_in.read_data
c_y_out = self.c_y_in.read_data
# Handle data rotation
if self.rotate != 0:
original_rng = np.rot90(original_rng, int(self.rotate/90))
c_x_out = np.rot90(c_x_out, int(self.rotate/90))
c_y_out = np.rot90(c_y_out, int(self.rotate/90))
if self.rotate in (90, 270):
x_min_in = self.y_min_in.read_data
x_max_in = self.y_max_in.read_data
y_min_in = self.x_min_in.read_data
y_max_in = self.x_max_in.read_data
else:
x_min_in = self.x_min_in.read_data
x_max_in = self.x_max_in.read_data
y_min_in = self.y_min_in.read_data
y_max_in = self.y_max_in.read_data
# Sample background noise data around edge of the input data
noise_sample = np.concatenate((
original_rng[0,:],
original_rng[-1,:],
original_rng[:,0][1:-1],
original_rng[:,-1][1:-1]))
noise_threshold = np.average(noise_sample) + 6 * np.std(noise_sample)
# Count number of elements in each row/column above threshold
thresh_row = np.array([len(np.where(row > noise_threshold)[0]) \
for row in original_rng])
thresh_col = np.array([len(np.where(col > noise_threshold)[0]) \
for col in original_rng.T])
# Get the row/column indices representing the edges of the
# data above the noise threshold
i_row = np.where(thresh_row > self.count)[0]
i_col = np.where(thresh_col > self.count)[0]
x_left = i_col[0]
x_right = i_col[-1]
y_top = i_row[0]
y_bottom = i_row[-1]
width = x_right - x_left
height = y_bottom - y_top
ctr_pt_y = int(height/2)
ctr_pt_x = int(width/2)
# Extend margins by self.dist, or by as much as the initial
# size allows
if x_left - self.dist >= 0:
x_left -= self.dist
ctr_pt_x += self.dist
else:
x_left = 0
if x_right + self.dist < original_rng.shape[1]:
x_right += self.dist
else:
x_right = original_rng.shape[1]
if y_top - self.dist >= 0:
y_top -= self.dist
ctr_pt_y += self.dist
else:
y_top = 0
if y_bottom + self.dist < original_rng.shape[0]:
y_bottom += self.dist
else:
y_bottom = original_rng.shape[0]
self.outputs[0].set(original_rng[y_top:y_bottom, x_left:x_right])
self.outputs[1].set(
c_x_out[y_top:y_bottom, x_left:x_right])
self.outputs[2].set(
c_y_out[y_top:y_bottom, x_left:x_right])
x_dim_in = x_max_in - x_min_in
y_dim_in = y_max_in - y_min_in
x_dim_out = x_dim_in * (y_bottom-y_top) / original_rng.shape[0]
y_dim_out = y_dim_in * (x_right-x_left) / original_rng.shape[1]
self.outputs[3].set(np.array([
x_dim_out, y_dim_out, width,
height, ctr_pt_x, ctr_pt_y, noise_threshold]))
class GenMask(CalcProcess):
"""
Take an external monochrome image file and an array of luminance data.
The image is converted to a boolean mask that is manipulated and fitted
to optimally match the luminance data.
"""
thresh_list = list(set(np.logspace(0, 3, dtype=int)))
thresh_list.sort()
gui_commands = [
("ComboboxFrame",
"lum_in",
("Luminance Data:", 300, 120), True),
("ComboboxFrame",
"dims_in",
("Dimensions [mm]:", 300, 120), True),
("TextEntryFrame", "out_name_mask", ("Mask Name:", 300, 120)),
("CheckEntryFrame", "to_db", "Save Mask to DB?"),
("TextEntryFrame", "offset", ("Offset [mm]:", 200, 100)),
("SpboxFrame", "count", ("Sensitivity:", list(range(1, 33)), 200, 100))
]
def __init__(self, out_name_mask, to_db, lum_in, dims_in,
offset, count):
self.name = "Generate Mask"
self.out_name_mask = out_name_mask
self.to_db = to_db
self.lum_in = lum_in
self.dims_in = dims_in
self.offset = offset
self.count = count
CalcProcess.__init__(self, list(set((lum_in.calc, dims_in.calc))))
self.inputs = [lum_in.name, dims_in.name]
self.outputs.append(
DataObject(out_name_mask, np.array([]), self, self.to_db))
def check(self):
try:
self.offset = float(self.offset)
except ValueError:
return "Input offset, angle, width, and height must be a number."
return None
def run(self):
data = self.lum_in.read_data
mask = np.full(data.shape, True)
edge_list = []
count = int(self.count)
test_size = (2*count + 1) ** 2
# Generate the initial mask array using a logarithmic histogram to
# separate out the illuminated region of the data.
n, bins = np.histogram(
data,
np.logspace(
np.log10(2*self.dims_in.read_data[6]),
np.log10(np.max(data)),
int(np.prod(data.shape)**0.5 / 4)))
i_peak_n = np.argmax(n) + 1
bin_diff = np.diff(n[np.max((0, i_peak_n-20)) : i_peak_n+1])
i_max_roc = np.argmax(bin_diff)
i = i_max_roc
while bin_diff[i] > 0.10 * bin_diff[i_max_roc]:
i -= 1
threshold = bins[np.max((0, i_peak_n-20)) + i]
for (i, j), val in np.ndenumerate(data):
if val < threshold:
mask[i, j] = False
# Determine the proper offset in pixels based on the image
# dimensions.
sfx = self.dims_in.read_data[0] / (self.lum_in.read_data.shape[1] - 1)
sfy = self.dims_in.read_data[1] / (self.lum_in.read_data.shape[0] - 1)
px_offset = int(self.offset / np.average((sfx, sfy)) * 2**0.5 * 0.5)
# Iterate over the mask array and detect edge points by taking
# a sample subset of the array about the test point and determining
# if 40-60% of the data is below the threshold.
for (i, j), val in np.ndenumerate(mask):
if val:
if (count-1) < i < (data.shape[0]-count) and \
(count-1) < j < (data.shape[1]-count):
test_arr = mask[i-count : i+count+1,
j-count : j+count+1]
test_edge = len(
np.where(test_arr)[0]) / test_size
if 0.4 < test_edge < 0.6:
edge_list.append((i, j))
# Set points within an offset radius of each edge point to False
for x, y in edge_list:
limits = [[x-px_offset, x+px_offset+1],
[y-px_offset, y+px_offset+1]]
for i in range(0, 2):
if limits[i][0] < 0:
limits[i][0] = 0
if limits[i][1] > mask.shape[i]-1:
limits[i][1] = mask.shape[i]-1
mask[limits[0][0]:limits[0][1],
limits[1][0]:limits[1][1]].fill(False)
# print(mask.shape)
self.outputs[0].set(mask)
class Uniformity(CalcProcess):
"""
Take an array of luminance data and the min/max dimensional values
of the array and calculate the uniformity by fitting a plane at each
point, using a set of points within a square of width (2 * dx + 1)
centered about the point.
"""
gui_commands = [
("SelectNameLabelFrame",
("lum_in", "out_name", "to_db"),
("Input:", "Output Name:", "Luminance Data", 400, (60, 100)),
True),
("ComboboxFrame",
"mask_in",
("Mask Array:", 200, 75), True),
("ComboboxFrame",
"dims",
("Dimensions [mm]:", 200, 75), True),
("SpboxFrame", "dx", ("Smoothing", list(range(1,11)), 150, 75)),
("SpboxFrame", "reduction",
("Reduction", list(range(1,20)), 150, 75))]
def __init__(self, out_name, to_db, lum_in, mask_in, dims, dx, reduction):
self.name = "Uniformity"
self.lum_in = lum_in
self.mask_in = mask_in
self.dims = dims
self.out_name = out_name
self.to_db = to_db
self.dx = dx
self.reduction = reduction
CalcProcess.__init__(
self,
[lum_in.calc, dims.calc, mask_in.calc])
self.inputs = [lum_in.name, mask_in.name, dims.name]
self.outputs.append(
DataObject(self.out_name, np.array([]), self, self.to_db))
def check(self):
return None
def run(self):
dx = int(self.dx)
r = int(self.reduction)
# Scale factors in the x,y directions
sfx = self.dims.read_data[0] / (self.lum_in.read_data.shape[1] - 1)
sfy = self.dims.read_data[1] / (self.lum_in.read_data.shape[0] - 1)
# Get proper shapes (accounting for loss of self.dx to each side)
mask = self.mask_in.read_data[dx:-dx, dx:-dx]
uni_out = np.full(mask.shape, 0)
for (i, j), _ in np.ndenumerate(uni_out):
# Only compute uniformity at unmasked points
if mask[i, j]:
# Only compute uniformity at every r-th point
if i % r == 0 and j % r == 0:
sample_arr = self.lum_in.read_data[
(i):(i + 2*dx + 1),
(j):(j + 2*dx + 1)]
pt_list = []
# Compute best fit plane
for (u, v), sample in np.ndenumerate(sample_arr):
pt_list.append([u * sfy, v * sfx, sample])
pt_list = np.asarray(pt_list)
x, y, z = pt_list[:, 0], pt_list[:, 1], pt_list[:, 2]
A = np.c_[x, y, np.ones(pt_list.shape[0])]
C, _, _, _ = np.linalg.lstsq(A, z, rcond=None)
# Spatial component of plane normal vector
n_s = ((C[0]**2 + C[1]**2) / (C[0]**2 + C[1]**2 + 1)) ** 0.5
# Value component of plane normal vector
n_v = (1 / (C[0]**2 + C[1]**2 + 1)) ** 0.5
# The uniformity is the slope of the surface
uni_out[i, j] = n_s / n_v
else:
uni_out[i, j] = uni_out[i - i % r, j - j % r]
# Apply mask to output uniformity heat map
uni_out = np.multiply(uni_out, mask)
self.outputs[0].set(uni_out)
class HeatmapStats(CalcProcess):
"""
Take an array of heatmap data and output a set of standard statistical
parameters for the heatmap, as well as a histogram of the data.
"""
gui_commands = [
("SelectNameLabelFrame",
("data_in",
("out_name_mean", "out_name_max", "out_name_median",
"out_name_75", "out_name_95", "out_name_99", "out_name_hist"),
("mean_to_db", "max_to_db", "median_to_db", "p75_to_db", "p95_to_db",
"p99_to_db", "hist_to_db")),
("Input:",
("Mean Name:", "Max Name:", "Median Name:", "75% Name:",
"95% Name:", "99% Name:", "Histogram Name:"),
"Heatmap Data", 420, (50, 100)), True),
("SpboxFrame", "n_bins",
("Histogram Bins", list(range(10,200)), 200, 120)),
("CheckEntryFrame", "log_hist", "Use Logarithmic Bins for Histogram?")]
def __init__(self, data_in, out_name_mean, out_name_max, out_name_median,
out_name_75, out_name_95, out_name_99, out_name_hist,
mean_to_db, max_to_db, median_to_db, p75_to_db, p95_to_db,
p99_to_db, hist_to_db, n_bins, log_hist):
self.name = "Heatmap Statistics"
self.data_in = data_in
self.out_name_mean = out_name_mean
self.out_name_max = out_name_max
self.out_name_median = out_name_median
self.out_name_75 = out_name_75
self.out_name_95 = out_name_95
self.out_name_99 = out_name_99
self.out_name_hist = out_name_hist
self.mean_to_db = mean_to_db
self.max_to_db = max_to_db
self.median_to_db = median_to_db
self.p75_to_db = p75_to_db
self.p95_to_db = p95_to_db
self.p99_to_db = p99_to_db
self.hist_to_db = hist_to_db
self.n_bins = n_bins
self.log_hist = log_hist
CalcProcess.__init__(self, [data_in.calc])
self.inputs.append(data_in.name)
self.outputs.extend([
DataObject(out_name_mean, None, self, mean_to_db),
DataObject(out_name_max, None, self, max_to_db),
DataObject(out_name_median, None, self, median_to_db),
DataObject(out_name_75, None, self, p75_to_db),
DataObject(out_name_95, None, self, p95_to_db),
DataObject(out_name_99, None, self, p99_to_db),
DataObject(out_name_hist, np.array([]), self, hist_to_db)])
def run(self):
data_in = self.data_in.read_data
# Sample background noise data around edge of the input data
noise_sample = np.concatenate((
data_in[0,:],
data_in[-1,:],
data_in[:,0][1:-1],
data_in[:,-1][1:-1]))
noise_threshold = np.average(noise_sample) + 12 * np.std(noise_sample)
if noise_threshold < 0.1:
noise_threshold = 1
data_filtered = data_in[data_in > noise_threshold]
try:
data_mean = float(np.mean(data_filtered))
data_max = float(np.max(data_filtered))
data_median = float(np.percentile(data_filtered, 50))
data_p75 = float(np.percentile(data_filtered, 75))
data_p95 = float(np.percentile(data_filtered, 95))
data_p99 = float(np.percentile(data_filtered, 99))
except ValueError: # Handle completely masked data
data_mean = 0
data_max = 0
data_median = 0
data_p75 = 0
data_p95 = 0
data_p99 = 0
# Generate histogram
if self.log_hist:
n, bins = np.histogram(
data_in,
np.logspace(
np.log10(noise_threshold),
np.log10(np.max(data_in)),
self.n_bins), (noise_threshold, data_in.max()))
else:
n, bins = np.histogram(
data_in, int(self.n_bins), (noise_threshold, data_in.max()))
hist_arr = np.array(list(zip(bins, n)))
self.outputs[0].set(data_mean)
self.outputs[1].set(data_max)
self.outputs[2].set(data_median)
self.outputs[3].set(data_p75)
self.outputs[4].set(data_p95)
self.outputs[5].set(data_p99)
self.outputs[6].set(hist_arr)
class LightLeakage(CalcProcess):
"""
Apply a mask to an array of luminance data to remove edges where
light leakage might occur. Output the calculated light leakage
(i.e. ratio of unmasked max to masked max), the unmasked max, and
masked luminance array.
"""
gui_commands = [
("SelectNameLabelFrame",
(("data_in", "mask_in"),
("out_name_masked", "out_name_leak", "out_name_peak"),
("masked_to_db", "leak_to_db", "peak_to_db")),
(("Luminance Data:", "Edge Mask:"),
("Masked Luminance Name:", "Light Leakage Name:", "Masked Max Name:"),
"Data Input", 500, (100, 150)), True)]
def __init__(self, data_in, mask_in, out_name_masked, out_name_leak,
out_name_peak, masked_to_db, leak_to_db, peak_to_db):
self.name = "Light Leakage"
self.data_in = data_in
self.mask_in = mask_in
self.out_name_masked = out_name_masked
self.out_name_leak = out_name_leak
self.out_name_peak = out_name_peak
self.masked_to_db = masked_to_db
self.leak_to_db = leak_to_db
self.peak_to_db = peak_to_db
CalcProcess.__init__(self, [data_in.calc, mask_in.calc])
self.inputs = [data_in.name, mask_in.name]
self.outputs.extend([
DataObject(out_name_masked, np.array([]), self, masked_to_db),
DataObject(out_name_leak, None, self, leak_to_db),
DataObject(out_name_peak, None, self, peak_to_db)])
def run(self):
lum_masked = np.multiply(self.data_in.read_data, self.mask_in.read_data)
peak_masked = lum_masked.max()
leakage = "{0:.00%}".format(self.data_in.read_data.max() / peak_masked)
self.outputs[0].set(lum_masked)
self.outputs[1].set(leakage)
self.outputs[2].set(float(peak_masked))
class ChromStats(CalcProcess):
"""
Apply a mask to arrays of chromaticity coordinate data and generate
relevant statistics and a 2D histogram of the data.
"""
gui_commands = [
("SelectNameLabelFrame",
(("c_x_in", "c_y_in", "mask_in"),
("out_name_xmean", "out_name_xmode", "out_name_ymean",
"out_name_ymode", "out_name_xbins", "out_name_ybins",
"out_name_hist"),
("xmean_to_db", "xmode_to_db", "ymean_to_db", "ymode_to_db",
"xbins_to_db", "ybins_to_db", "hist_to_db")),
(("Chrom. X Data:", "Chrom. Y Data:", "Edge Mask:"),
("X Mean Name:", "X Mode Name:", "Y Mean Name:", "Y Mode Name:",
"X Hist. Bins Name:", "Y Hist. Bins Name:", "Histogram Name:"),
"Data Input", 500, (100, 100)), True),
("SpboxFrame", "n_bins",
("Histogram Bins", list(range(10,200)), 200, 120))]
def __init__(self, c_x_in, c_y_in, mask_in, out_name_xmean, out_name_xmode,
out_name_ymean, out_name_ymode, out_name_xbins, out_name_ybins,
out_name_hist, xmean_to_db, xmode_to_db, ymean_to_db,
ymode_to_db, xbins_to_db, ybins_to_db, hist_to_db, n_bins):
self.name = "Chromaticity Statistics"
self.c_x_in = c_x_in
self.c_y_in = c_y_in
self.mask_in = mask_in
self.out_name_xmean = out_name_xmean
self.out_name_xmode = out_name_xmode
self.out_name_ymean = out_name_ymean
self.out_name_ymode = out_name_ymode
self.out_name_xbins = out_name_xbins
self.out_name_ybins = out_name_ybins
self.out_name_hist = out_name_hist
self.xmean_to_db = xmean_to_db
self.xmode_to_db = xmode_to_db
self.ymean_to_db = ymean_to_db
self.ymode_to_db = ymode_to_db
self.xbins_to_db = xbins_to_db
self.ybins_to_db = ybins_to_db
self.hist_to_db = hist_to_db
self.n_bins = n_bins
CalcProcess.__init__(self, [c_x_in.calc, c_y_in.calc, mask_in.calc])
self.inputs = [c_x_in.name, c_y_in.name, mask_in.name]
self.outputs.extend([
DataObject(out_name_xmean, None, self, xmean_to_db),
DataObject(out_name_xmode, None, self, xmode_to_db),
DataObject(out_name_ymean, None, self, ymean_to_db),
DataObject(out_name_ymode, None, self, ymode_to_db),
DataObject(out_name_xbins, np.array([]), self, xbins_to_db),
DataObject(out_name_ybins, np.array([]), self, ybins_to_db),
DataObject(out_name_hist, np.array([]), self, hist_to_db)])
def run(self):
self.n_bins = int(self.n_bins)
# Apply the mask to the chromaticity data
c_x = np.ndarray.flatten(np.multiply(
self.c_x_in.read_data, self.mask_in.read_data))
# c_x_masked = np.ma.masked_less_equal(c_x, 0)
c_y = np.ndarray.flatten(np.multiply(
self.c_y_in.read_data, self.mask_in.read_data))
# c_y_masked = np.ma.masked_less_equal(c_y, 0)
# Filter out values that must be invalid based on out of
# range coordinates (i.e. outside visible region of CIE 1931)
c_x_filtered = c_x[c_x > 0.0036]
c_x_filtered = c_x_filtered[c_x_filtered < 0.735]
c_y_filtered = c_y[c_y > 0.0048]
c_y_filtered = c_y_filtered[c_y_filtered < 0.835]
self.outputs[0].set(float(np.mean(c_x_filtered)))
self.outputs[2].set(float(np.mean(c_y_filtered)))
# Expel outliers
xmin = np.percentile(c_x_filtered, 1)
xmax = np.percentile(c_x_filtered, 99)
ymin = np.percentile(c_y_filtered, 1)
ymax = np.percentile(c_y_filtered, 99)
# Generate the histogram and calculate the mode of each coordinate
n, xbins, ybins = np.histogram2d(
c_x, c_y, self.n_bins, [[xmin-0.01, xmax+0.01],
[ymin-0.01, ymax+0.01]])
n = n.T
nx_max = np.argmax(np.sum(n, axis=1))
ny_max = np.argmax(np.sum(n, axis=0))
xmode = np.mean((xbins[nx_max], xbins[nx_max+1]))
ymode = np.mean((ybins[ny_max], ybins[ny_max+1]))
self.outputs[1].set(float(xmode))
self.outputs[3].set(float(ymode))
self.outputs[4].set(xbins)
self.outputs[5].set(ybins)
self.outputs[6].set(n)
class DominantWL(CalcProcess):
"""
Apply a mask to arrays of chromaticity coordinate data and resolve
the coordinates at each point to a dominant wavelength (or purple hue).
"""
gui_commands = [
("SelectNameLabelFrame",
(("c_x_in", "c_y_in", "mask_in"),
("out_name_wlmean", "out_name_wlmode", "out_name_phmean",
"out_name_phmode", "out_name_wlhist", "out_name_phhist",
"out_name_wlmap"),
("wlmean_to_db", "wlmode_to_db", "phmean_to_db", "phmode_to_db",
"wlhist_to_db", "phhist_to_db", "wlmap_to_db")),
(("Chrom. X Data:", "Chrom. Y Data:", "Edge Mask:"),
("WL Mean Name:", "WL Mode Name:", "PH Mean Name:", "PH Mode Name:",
"WL Hist. Name:", "PH Hist. Name:", "WL Map Name:"),
"Data Input", 500, (100, 120)), True),
("SpboxFrame", "reduction",
("Reduction", list(range(1,20)), 150, 75))]
def __init__(self, c_x_in, c_y_in, mask_in, out_name_wlmean,
out_name_wlmode, out_name_phmean, out_name_phmode,
out_name_wlhist, out_name_phhist, out_name_wlmap,
wlmean_to_db, wlmode_to_db, phmean_to_db, phmode_to_db,
wlhist_to_db, phhist_to_db, wlmap_to_db, reduction):
self.name = "Dominant Wavelength"
self.c_x_in = c_x_in
self.c_y_in = c_y_in
self.mask_in = mask_in
self.out_name_wlmean = out_name_wlmean
self.out_name_wlmode = out_name_wlmode
self.out_name_phmean = out_name_phmean
self.out_name_phmode = out_name_phmode
self.out_name_wlhist = out_name_wlhist
self.out_name_phhist = out_name_phhist
self.out_name_wlmap = out_name_wlmap
self.wlmean_to_db = wlmean_to_db
self.wlmode_to_db = wlmode_to_db
self.phmean_to_db = phmean_to_db
self.phmode_to_db = phmode_to_db
self.wlhist_to_db = wlhist_to_db
self.phhist_to_db = phhist_to_db
self.wlmap_to_db = wlmap_to_db
self.reduction = reduction
CalcProcess.__init__(self, [c_x_in.calc, c_y_in.calc, mask_in.calc])
self.inputs = [c_x_in.name, c_y_in.name, mask_in.name]
self.outputs.extend([
DataObject(out_name_wlmean, None, self, wlmean_to_db),
DataObject(out_name_wlmode, None, self, wlmode_to_db),
DataObject(out_name_phmean, None, self, phmean_to_db),
DataObject(out_name_phmode, None, self, phmode_to_db),
DataObject(out_name_wlhist, np.array([]), self, wlhist_to_db),
DataObject(out_name_phhist, np.array([]), self, phhist_to_db),
DataObject(out_name_wlmap, np.array([]), self, wlmap_to_db)])
def run(self):
r = int(self.reduction)
c_x = self.c_x_in.read_data
c_y = self.c_y_in.read_data
mask = self.mask_in.read_data
wlmap = np.full(c_x.shape, 0.)
# Standard D65 illuminant white point
white_pt = (0.31271, 0.32902)
# If data is to be reduced, go through unmasked region of the
# data set and average chromaticity coordinates by reduction bin
if r > 1:
for (i, j), _ in np.ndenumerate(c_x):
if mask[i, j]:
if i % r == 0 and j % r == 0:
c_x[i, j] = np.mean(c_x[i : i+r-1, j : j+r-1])
c_y[i, j] = np.mean(c_y[i : i+r-1, j : j+r-1])
# Wavelength keys for first pass rough approx of spectral locus
first_pass = [360, 500, 510, 520, 540, 560, 830]
for (i, j), _ in np.ndenumerate(c_x[:-r, :-r]):
if mask[i : i+r, j : j+r].any():
if i % r == 0 and j % r == 0:
meas_pt = (c_x[i, j], c_y[i, j])
int_pt = None
k = -2
# Conduct first pass
while int_pt == None:
k += 1
int_pt = self.check_int(
chrom_dict[first_pass[k]],
chrom_dict[first_pass[k+1]],
white_pt, meas_pt)
# Handle intersection point on line of purples
if k == -1:
val = self.partial_int(
meas_pt, chrom_dict[360], chrom_dict[830])
wlmap[i : i+r, j : j+r] = val
else:
interval = [first_pass[k], first_pass[k+1]]
# Home in on the intersection point with the
# spectral locus
while interval[1]-interval[0] > 1:
int_pt = None
k = -1
next_pass = [interval[0],
int(np.mean(interval)),
interval[1]]
while int_pt == None:
k += 1
int_pt = self.check_int(
chrom_dict[next_pass[k]],
chrom_dict[next_pass[k+1]],
white_pt, meas_pt)
interval = [next_pass[k], next_pass[k+1]]
# Set the wavelength value as the closest
# integer wavelength value to the intersection
val = self.partial_int(
meas_pt, chrom_dict[interval[0]],
chrom_dict[interval[1]])
if val <= 0.5:
val = interval[0]
else:
val = interval[1]
wlmap[i : i+r, j : j+r] = val
wlmap = np.multiply(wlmap, mask)
# Sift out points where a valid wavelength was found or where
# a purple hue was found
wlmap_filter_wl = wlmap[wlmap >= 360]
wlmap_filter_ph = wlmap[wlmap < 360]
wlmap_filter_ph = wlmap_filter_ph[wlmap_filter_ph > 0]
np.savetxt('testwl.txt', wlmap)
if len(wlmap_filter_wl) > 0:
n_wl, bins_wl = np.histogram(wlmap_filter_wl, 94, (360, 830))
wl_mean = np.mean(wlmap_filter_wl)
wl_mode = bins_wl[np.argmax(n_wl)]
else:
n_wl, bins_wl = np.full(94, 0.0001), np.linspace(360, 830, 95)
wl_mean = 0
wl_mode = 0
if len(wlmap_filter_ph) > 0:
n_ph, bins_ph = np.histogram(wlmap_filter_ph, 25, (0, 1))
ph_mean = np.mean(wlmap_filter_ph)
ph_mode = bins_wl[np.argmax(n_ph)]
else:
n_ph, bins_ph = np.full(25, 0.0001), np.linspace(0, 1, 26)
ph_mean = 0
ph_mode = 0
# To a large value that will be converted to gray in the heatmap
for (i, j), _ in np.ndenumerate(wlmap):
if 0 < wlmap[i, j] <= 1:
wlmap[i, j] = 10000
wl_hist_arr = np.array(list(zip(bins_wl, n_wl)))
ph_hist_arr = np.array(list(zip(bins_ph, n_ph)))
self.outputs[0].set(float(wl_mean))
self.outputs[1].set(float(wl_mode))
self.outputs[2].set(float(ph_mean))
self.outputs[3].set(float(ph_mode))
self.outputs[4].set(wl_hist_arr)
self.outputs[5].set(ph_hist_arr)
self.outputs[6].set(wlmap)
def check_int(self, seg1, seg2, wp, meas):
# Check if the ray emanating from wp to meas intersects with
# the line segment defined by seg1 and seg2
m_seg = (seg2[1] - seg1[1]) / (seg2[0] - seg1[0])
b_seg = seg1[1] - seg1[0] * m_seg
m_ray = (meas[1] - wp[1]) / (meas[0] - wp[0])
b_ray = wp[1] - wp[0] * m_ray
int_pt = ((b_ray - b_seg) / (m_seg - m_ray),
m_seg * ((b_ray - b_seg) / (m_seg - m_ray)) + b_seg)
# Check if intersection point is in the direction of the ray
if all(np.signbit(np.subtract(meas, wp)) == \
np.signbit(np.subtract(int_pt, wp))):
# Check if the intersection point falls between the two line
# segment points
if all(np.signbit(np.subtract(int_pt, seg1)) != \
np.signbit(np.subtract(int_pt, seg2))):
return int_pt
return None
def partial_int(self, meas, pt1, pt2):
# Point meas falls on the line segment between pt1 and pt2.
# Return the ratio of meas-pt1 to pt2-pt1.
return np.linalg.norm(np.subtract(meas, pt1)) \
/ np.linalg.norm(np.subtract(pt2, pt1))
# CIE 1931 chromaticity spectral locus data
chrom_dict = {360: (0.175560231755724, 0.00529383701144858),
361: (0.175482527710407, 0.00528633910591523),
362: (0.175400022356684, 0.00527864204310934),
363: (0.175317049458602, 0.00527096879423463),
364: (0.175236739463886, 0.00526349392275481),
365: (0.175161218506262, 0.00525634591510605),
366: (0.175087794161125, 0.0052468445276602),
367: (0.175014938867913, 0.00523557032866828),
368: (0.174945189438668, 0.00522615691108036),
369: (0.174880124778842, 0.0052207849401792),
370: (0.174820607679635, 0.00522060093793847),
371: (0.174770252213918, 0.00522866721672108),
372: (0.174722036673711, 0.00523752017723635),
373: (0.174665367950954, 0.00523616066324846),
374: (0.174595050265963, 0.00521832225253951),
375: (0.17450972086916, 0.00518163977014415),
376: (0.174409249351858, 0.00512676089768082),
377: (0.174308458223786, 0.00506759252024135),
378: (0.174221772058161, 0.0050170315362969),
379: (0.174155594353273, 0.00498144491089279),
380: (0.174112234426342, 0.00496372598145272),
381: (0.17408830716741, 0.00496360006529369),
382: (0.174072590901536, 0.00497254261822802),
383: (0.174057024292786, 0.00498203613909543),
384: (0.17403627060996, 0.00498596142862248),
385: (0.174007917515889, 0.00498054862299504),
386: (0.173971929754687, 0.00496408309597096),
387: (0.173931678596305, 0.0049434066494304),
388: (0.1738890357771, 0.00492604851075011),
389: (0.173845256167111, 0.00491609307090683),
390: (0.173800772620828, 0.00491541190537341),
391: (0.173754438047173, 0.00492485369535323),
392: (0.17370535274905, 0.00493709837109517),
393: (0.173655189400453, 0.00494379098332086),
394: (0.173606018216907, 0.00493989527110925),
395: (0.173559906527214, 0.00492320257730789),
396: (0.173514449742224, 0.00489544671312358),
397: (0.173468498200431, 0.00486457913883596),
398: (0.173423666225833, 0.00483631212221056),
399: (0.173379996016857, 0.00481333832384587),
400: (0.173336865480781, 0.00479674344726689),
401: (0.173291285658761, 0.00478584564814538),
402: (0.173237920453112, 0.00477888793221686),
403: (0.173174238776235, 0.00477513079983524),
404: (0.173101012208515, 0.00477403067449075),
405: (0.173020965455495, 0.00477505036185929),
406: (0.172934256850859, 0.00478114717178147),
407: (0.172842756135349, 0.00479079294906705),
408: (0.172751152603347, 0.00479876209926314),
409: (0.172662105581222, 0.00480208435632195),
410: (0.172576550848802, 0.00479930191972077),
411: (0.172489477381802, 0.0047952543644012),
412: (0.172395603384173, 0.00479611858893491),
413: (0.172296001755019, 0.00480262947347127),
414: (0.172192360361959, 0.0048148852140205),
415: (0.172086630755248, 0.00483252421803995),
416: (0.171982445938222, 0.00485501016856446),
417: (0.171871019445674, 0.00488853192151211),
418: (0.171741213705737, 0.00493933245661687),
419: (0.171587239364847, 0.00501034420683924),
420: (0.171407433863109, 0.00510217097374933),
421: (0.171206113461594, 0.00521125777669814),
422: (0.170992574221804, 0.00533390776201582),
423: (0.170770596367909, 0.00547012124749784),
424: (0.170540661923529, 0.00562096993347462),
425: (0.170300988779736, 0.00578850499647099),
426: (0.170050158668149, 0.00597389510789107),
427: (0.16978586875087, 0.00617680748815949),
428: (0.169504602532254, 0.00639803690687803),
429: (0.169202921712127, 0.00663870591838744),
430: (0.168877520670989, 0.00690024388793052),
431: (0.168524660344249, 0.00718404388802375),
432: (0.168146145461531, 0.00749067966632363),
433: (0.167746219826537, 0.00782081848842158),
434: (0.167328325744596, 0.00817539967500124),
435: (0.16689529035208, 0.00855560636081898),
436: (0.166446327135003, 0.0089644004177575),
437: (0.165976758230656, 0.00940171622686875),
438: (0.165483299011466, 0.00986468097234593),
439: (0.164962663720259, 0.0103507435414824),
440: (0.164411756375275, 0.0108575582767639),
441: (0.163828432761608, 0.0113848656159641),
442: (0.163209895954422, 0.0119373858145677),
443: (0.162552139506799, 0.0125200299175854),
444: (0.161851438065089, 0.013137307095434),
445: (0.161104579580275, 0.0137933588217324),
446: (0.160309595019389, 0.0144913781663337),
447: (0.159465945758018, 0.0152320646437253),
448: (0.158573111075907, 0.0160151564155888),
449: (0.157631165578262, 0.0168398709715094),
450: (0.156640932577307, 0.0177048049908913),
451: (0.155605095582748, 0.0186086065240072),
452: (0.154524612494681, 0.0195556978045396),
453: (0.153397229336432, 0.0205537335298569),
454: (0.152219236228253, 0.0216117110209021),
455: (0.150985408375971, 0.022740193291643),
456: (0.149690564758713, 0.0239503301957584),
457: (0.148336817067949, 0.0252473984317283),
458: (0.146928226501376, 0.0266351858576878),
459: (0.145468371778522, 0.0281184333297045),
460: (0.143960396039604, 0.0297029702970297),
461: (0.142405090190101, 0.0313935839862295),
462: (0.14079564666459, 0.03321315460626),
463: (0.139120682426571, 0.0352005728268017),
464: (0.137363757935118, 0.0374030904436341),
465: (0.135502671199611, 0.0398791214721278),
466: (0.133509340955908, 0.0426923900105262),
467: (0.131370635235575, 0.0458759752225473),
468: (0.129085786557187, 0.0494498106597349),
469: (0.126662156977009, 0.0534259197730497),
470: (0.124118476727786, 0.0578025133737405),
471: (0.121468583913083, 0.0625876720665533),
472: (0.118701276452039, 0.0678304435323486),
473: (0.115807358768397, 0.0735807079728414),
474: (0.11277605484761, 0.0798958228959609),
475: (0.10959432361561, 0.0868425111830942),
476: (0.106260735317928, 0.0944860722037205),
477: (0.10277586294651, 0.102863738818152),
478: (0.0991275999016733, 0.112007033037195),
479: (0.0953040562149913, 0.121944863254658),
480: (0.0912935070022712, 0.13270204248699),
481: (0.0870824317270964, 0.144316582680233),
482: (0.082679534481971, 0.15686595807724),
483: (0.0781159857333012, 0.170420486476505),
484: (0.0734372599047498, 0.18503188052712),
485: (0.0687059212910555, 0.200723217728102),
486: (0.0639930236869067, 0.217467605405061),
487: (0.0593158279806231, 0.235253740241245),
488: (0.0546665228761952, 0.254095590747025),
489: (0.0500314970581197, 0.274001803219802),
490: (0.0453907346747777, 0.294975964606287),
491: (0.0407573153360254, 0.316981080839994),
492: (0.0361951091539399, 0.339899934413942),
493: (0.0317564703789208, 0.363597693246343),
494: (0.0274941905347843, 0.387921328280829),
495: (0.0234599425470795, 0.412703479093521),
496: (0.0197046363029537, 0.437755888652074),
497: (0.0162684712672383, 0.462954507988606),
498: (0.0131830411530808, 0.48820706841228),
499: (0.0104757006831256, 0.513404245160212),
500: (0.00816802800466744, 0.538423070511752),
501: (0.00628485157264002, 0.563068456321616),
502: (0.00487542999269079, 0.587116438044602),
503: (0.00398242535235287, 0.610447497638674),
504: (0.00363638422545277, 0.633011382750804),
505: (0.00385852090032154, 0.654823151125402),
506: (0.00464571323255553, 0.675898458599253),
507: (0.00601091307169603, 0.696120061336206),
508: (0.00798839582865335, 0.715341516255831),
509: (0.010603290554259, 0.733412942651556),
510: (0.0138702460850112, 0.750186428038777),
511: (0.017766124205863, 0.765612154434196),
512: (0.0222442056947439, 0.779629923200771),
513: (0.02727326242017, 0.792103502831263),
514: (0.0328203575222175, 0.80292567298964),
515: (0.0388518024032043, 0.812016021361816),
516: (0.0453279848294139, 0.819390800456081),
517: (0.0521766909052169, 0.825163542582536),
518: (0.0593255333519871, 0.829425776296551),
519: (0.0667158860270346, 0.832273739283868),
520: (0.074302424773375, 0.833803091340228),
521: (0.0820533952358065, 0.834090314504944),
522: (0.0899417395853361, 0.833288918895958),
523: (0.0979397501105561, 0.831592666498741),
524: (0.106021107332241, 0.829178186631099),
525: (0.11416071960668, 0.826206959781189),
526: (0.122347367033701, 0.822770399563869),
527: (0.130545668138394, 0.818927852909404),
528: (0.138702349214235, 0.814774382594947),
529: (0.146773215738364, 0.810394606547811),
530: (0.154722061215713, 0.805863545425649),
531: (0.162535424655456, 0.801238480413611),
532: (0.170237195478923, 0.796518542245412),
533: (0.177849528011742, 0.791686579059916),
534: (0.185390757399363, 0.786727772820681),
535: (0.192876097877721, 0.781629216363077),
536: (0.200308798144942, 0.77639941605062),
537: (0.207689989666574, 0.771054798660324),
538: (0.215029550005768, 0.765595096060611),
539: (0.222336603758204, 0.760019999740836),
540: (0.22961967264964, 0.754329089902744),
541: (0.236884720598308, 0.748524465174775),
542: (0.244132556473824, 0.742613991681373),
543: (0.251363408870738, 0.736605581362445),
544: (0.258577508455251, 0.730506601909747),
545: (0.265775084971184, 0.724323924929806),
546: (0.27295760351093, 0.71806218641679),
547: (0.280128942481592, 0.711724734569193),
548: (0.287292409080259, 0.705316273888283),
549: (0.294450280894396, 0.698842022022381),
550: (0.301603799395751, 0.692307762371574),
551: (0.308759923092657, 0.68571206060674),
552: (0.315914394448992, 0.67906347999093),
553: (0.323066265382129, 0.672367397968749),
554: (0.330215545356897, 0.665628025417149),
555: (0.337363332850857, 0.658848290139688),
556: (0.344513198355454, 0.652028209217844),
557: (0.35166441129682, 0.645172174245398),
558: (0.358813686684303, 0.638287336537753),
559: (0.365959357349119, 0.631379080899849),
560: (0.373101543868457, 0.624450859796661),
561: (0.380243835464065, 0.617502152173705),
562: (0.387378977958644, 0.610541802455016),
563: (0.394506548796889, 0.603571336791597),
564: (0.401625918831181, 0.596592421962562),
565: (0.408736255706423, 0.589606868859531),
566: (0.415835774705559, 0.582617968056976),
567: (0.422920926709796, 0.575630688323188),
568: (0.429988626512438, 0.568648891270804),
569: (0.437036422593891, 0.561675774049367),
570: (0.444062463582333, 0.554713902808531),
571: (0.451064940950767, 0.547766044129445),
572: (0.458040665647423, 0.540836629164402),
573: (0.464986332977633, 0.533930053056831),
574: (0.471898743899668, 0.527050569219262),
575: (0.478774791157584, 0.520202307211456),
576: (0.485611587052091, 0.51338866096156),
577: (0.492404982334296, 0.506614924420926),
578: (0.499150668334298, 0.499887340438306),
579: (0.505845283794021, 0.493211178107541),
580: (0.512486366781797, 0.486590788060857),
581: (0.519072510400636, 0.480028612176447),
582: (0.525600488985451, 0.473527373975529),
583: (0.53206559916124, 0.467091363703909),
584: (0.538462761902554, 0.460725253840842),
585: (0.544786505594834, 0.454434114568836),
586: (0.551031050212292, 0.448224502909975),
587: (0.557192906096034, 0.44209913948419),
588: (0.563269312373157, 0.436058061736674),
589: (0.569256824124726, 0.430101973605084),
590: (0.575151311365165, 0.424232234924905),
591: (0.580952605160923, 0.418446879816537),
592: (0.586650186890891, 0.412758421192096),
593: (0.592224800070941, 0.407189528585467),
594: (0.597658162105241, 0.40176193497228),
595: (0.602932785575716, 0.396496633572977),
596: (0.608035111132285, 0.391409151707708),
597: (0.612976999570812, 0.386486157331698),
598: (0.617778725585283, 0.381705756828504),
599: (0.622459295078623, 0.377047286380739),
600: (0.627036599763872, 0.372491145218418),
601: (0.63152094286026, 0.368026010821791),
602: (0.635899819576266, 0.363665402433267),
603: (0.640156159547881, 0.359427724303784),
604: (0.644272960657274, 0.355331369771593),
605: (0.648233106013639, 0.351394916305022),
606: (0.65202823571793, 0.347627960748423),
607: (0.655669179249501, 0.344018294844416),
608: (0.659166134692712, 0.340553225412233),
609: (0.662528222053688, 0.337220992607213),
610: (0.665763576238097, 0.334010651154761),
611: (0.668874143663558, 0.330918553002487),
612: (0.671858667147589, 0.327947074299904),
613: (0.674719511111516, 0.325095182004255),
614: (0.677458888275166, 0.322362076688633),
615: (0.680078849721707, 0.319747217068646),
616: (0.68258157418701, 0.317248705962303),
617: (0.684970601448709, 0.314862815015325),
618: (0.687250454556694, 0.312585963995597),
619: (0.689426303027996, 0.310414011285913),
620: (0.691503972961702, 0.308342260556656),
621: (0.693489634972634, 0.306365690817556),
622: (0.695388638101952, 0.304478555199648),
623: (0.697205569778691, 0.302675072703968),
624: (0.698943910385795, 0.300950424978996),
625: (0.700606060606061, 0.299300699300699),
626: (0.702192588540645, 0.297724511862993),
627: (0.703708691019457, 0.296217118054471),
628: (0.70516285342369, 0.294770292086239),
629: (0.706563246693848, 0.293376153173208),
630: (0.707917791621664, 0.29202710893484),
631: (0.709230985413973, 0.290718622165321),
632: (0.710500394495371, 0.289452941203169),
633: (0.711724146175049, 0.288232104798955),
634: (0.71290123112985, 0.28705732036371),
635: (0.714031597116994, 0.28592887354565),
636: (0.715117053483185, 0.284845106128776),
637: (0.716159198599114, 0.283804449181674),
638: (0.717158613642121, 0.282806411930931),
639: (0.718116142602162, 0.281850256562656),
640: (0.719032941629744, 0.280934951518654),
641: (0.719911552942295, 0.280058078206728),
642: (0.720752706639807, 0.279218959823395),
643: (0.721554522486917, 0.27841951468527),
644: (0.722314915560207, 0.277661870367724),
645: (0.723031602573095, 0.276948357748342),
646: (0.723701916040434, 0.276281836244226),
647: (0.724328018926374, 0.275660074920571),
648: (0.72491440513421, 0.275078184054899),
649: (0.725466776098186, 0.274529977978249),
650: (0.725992317541613, 0.274007682458387),
651: (0.726494726713582, 0.273505273286418),
652: (0.726974970468941, 0.273025029531059),
653: (0.727431838038032, 0.272568161961968),
654: (0.727864310792652, 0.272135689207348),
655: (0.728271728271728, 0.271728271728272),
656: (0.728656487100348, 0.271343512899652),
657: (0.729020030309354, 0.270979969690646),
658: (0.729360950694672, 0.270639049305328),
659: (0.729677783237757, 0.270322216762243),
660: (0.729969012837539, 0.270030987162461),
661: (0.730233949140853, 0.269766050859146),
662: (0.730474165302262, 0.269525834697738),
663: (0.730693306723888, 0.269306693276112),
664: (0.730896252242901, 0.269103747757099),
665: (0.73108939558451, 0.26891060441549),
666: (0.731279635919078, 0.268720364080922),
667: (0.731467050901285, 0.268532949098715),
668: (0.731649970922014, 0.268350029077986),
669: (0.731826333484643, 0.268173666515357),
670: (0.731993299832496, 0.268006700167504),
671: (0.732150422161567, 0.267849577838433),
672: (0.732299831084624, 0.267700168915376),
673: (0.732442822871014, 0.267557177128986),
674: (0.732581493590162, 0.267418506409838),
675: (0.732718894009217, 0.267281105990783),
676: (0.732858647268478, 0.267141352731522),
677: (0.733000205253019, 0.266999794746981),
678: (0.733141671137124, 0.266858328862876),
679: (0.733281178934726, 0.266718821065274),
680: (0.733416967225968, 0.266583032774032),
681: (0.733550585847972, 0.266449414152028),
682: (0.733683296435596, 0.266316703564404),
683: (0.733812716688138, 0.266187283311862),
684: (0.733935690314492, 0.266064309685508),
685: (0.734047300312361, 0.265952699687639),
686: (0.734142556896655, 0.265857443103345),
687: (0.734221470250776, 0.265778529749224),
688: (0.73428644636676, 0.26571355363324),
689: (0.734340919958724, 0.265659080041276),
690: (0.734390164995147, 0.265609835004853),
691: (0.734437712655789, 0.265562287344211),
692: (0.7344821704111, 0.2655178295889),
693: (0.73452293055209, 0.26547706944791),
694: (0.734559518422289, 0.265440481577711),
695: (0.734591661642629, 0.265408338357371),
696: (0.734621094712899, 0.265378905287101),
697: (0.734648896835819, 0.265351103164181),
698: (0.734673378010567, 0.265326621989433),
699: (0.734690045444152, 0.265309954555848),
700: (0.734690023258281, 0.265309976741719),
701: (0.734689987139029, 0.265310012860971),
702: (0.734690006501357, 0.265309993498643),
703: (0.734690006621243, 0.265309993378757),
704: (0.73468999733936, 0.26531000266064),
705: (0.734690010322542, 0.265309989677458),
706: (0.734690001779948, 0.265309998220052),
707: (0.734689989928096, 0.265310010071905),
708: (0.734689997524543, 0.265310002475457),
709: (0.734690015028432, 0.265309984971568),
710: (0.734689988232974, 0.265310011767026),
711: (0.734690013992153, 0.265309986007847),
712: (0.734690016799078, 0.265309983200922),
713: (0.73468998731057, 0.26531001268943),
714: (0.734689992515467, 0.265310007484533),
715: (0.734690013707087, 0.265309986292913),
716: (0.734690017589652, 0.265309982410348),
717: (0.734690003073752, 0.265309996926248),
718: (0.734689983373119, 0.265310016626881),
719: (0.734689976107201, 0.265310023892799),
720: (0.734690004148161, 0.265309995851839),
721: (0.734689987681399, 0.265310012318601),
722: (0.734689974353903, 0.265310025646097),
723: (0.734689984824148, 0.265310015175852),
724: (0.734689956808599, 0.265310043191401),
725: (0.73468999960562, 0.26531000039438),
726: (0.734689976446753, 0.265310023553247),
727: (0.734689955409799, 0.265310044590201),
728: (0.734690030031784, 0.265309969968216),
729: (0.734689978026177, 0.265310021973822),
730: (0.734689952045209, 0.265310047954791),
731: (0.734690033024927, 0.265309966975073),
732: (0.73468995791969, 0.26531004208031),
733: (0.734689940400156, 0.265310059599844),
734: (0.734690083270746, 0.265309916729254),
735: (0.734689992493292, 0.265310007506708),
736: (0.734689993262503, 0.265310006737497),
737: (0.734690004102656, 0.265309995897344),
738: (0.734689988466083, 0.265310011533917),
739: (0.734689997004496, 0.265310002995504),
740: (0.734690005712895, 0.265309994287105),
741: (0.734690001061002, 0.265309998938998),
742: (0.734689986019313, 0.265310013980687),
743: (0.734690003235027, 0.265309996764973),
744: (0.734689985970854, 0.265310014029147),
745: (0.73469000015897, 0.26530999984103),
746: (0.734689987259057, 0.265310012740943),
747: (0.734690004447292, 0.265309995552708),
748: (0.734689986053641, 0.265310013946359),
749: (0.734690022445542, 0.265309977554458),
750: (0.734690010703047, 0.265309989296953),
751: (0.734689995519642, 0.265310004480358),
752: (0.734690031649795, 0.265309968350205),
753: (0.734690017035775, 0.265309982964225),
754: (0.73469002018521, 0.26530997981479),
755: (0.734690001849036, 0.265309998150965),
756: (0.734689969923672, 0.265310030076328),
757: (0.734690021473951, 0.265309978526049),
758: (0.734689988508468, 0.265310011491531),
759: (0.734690010940798, 0.265309989059202),
760: (0.734689952045209, 0.265310047954791),
761: (0.734689958511502, 0.265310041488498),
762: (0.734690051943764, 0.265309948056236),
763: (0.734689987703414, 0.265310012296586),
764: (0.734689955549775, 0.265310044450225),
765: (0.734689918842647, 0.265310081157353),
766: (0.734690053566253, 0.265309946433747),
767: (0.734690090428592, 0.265309909571407),
768: (0.734690006317623, 0.265309993682377),
769: (0.734689993716373, 0.265310006283627),
770: (0.734689998971482, 0.265310001028518),
771: (0.734690003046332, 0.265309996953668),
772: (0.73469001265918, 0.265309987340821),
773: (0.734689987869484, 0.265310012130516),
774: (0.734690008119968, 0.265309991880032),
775: (0.734689985247762, 0.265310014752238),
776: (0.734690001171045, 0.265309998828956),
777: (0.734689986762428, 0.265310013237572),
778: (0.734690009355341, 0.265309990644659),
779: (0.734689991422002, 0.265310008577998),
780: (0.734689983741576, 0.265310016258424),
781: (0.734690022025579, 0.265309977974421),
782: (0.734689973773, 0.265310026227),
783: (0.734690000588213, 0.265309999411786),
784: (0.734690003434716, 0.265309996565284),
785: (0.734689985247762, 0.265310014752238),
786: (0.73468999874321, 0.26531000125679),
787: (0.734689968813787, 0.265310031186213),
788: (0.73469000085901, 0.26530999914099),
789: (0.734690032239148, 0.265309967760853),
790: (0.734689953954455, 0.265310046045545),
791: (0.734689962693029, 0.265310037306971),
792: (0.734690021977645, 0.265309978022356),
793: (0.734689997973122, 0.265310002026878),
794: (0.734690021714466, 0.265309978285534),
795: (0.734689974603344, 0.265310025396656),
796: (0.734690071104975, 0.265309928895025),
797: (0.734690038597127, 0.265309961402873),
798: (0.734690072330406, 0.265309927669594),
799: (0.734689945925162, 0.265310054074838),
800: (0.734689988020245, 0.265310011979755),
801: (0.734690003519111, 0.265309996480889),
802: (0.734690008017908, 0.265309991982092),
803: (0.734690004773334, 0.265309995226666),
804: (0.734689992552339, 0.265310007447661),
805: (0.734689990778549, 0.265310009221451),
806: (0.734689991319004, 0.265310008680996),
807: (0.734690014718562, 0.265309985281438),
808: (0.734689992876979, 0.265310007123021),
809: (0.734689991099063, 0.265310008900937),
810: (0.734689997844699, 0.265310002155301),
811: (0.734690013288509, 0.265309986711491),
812: (0.734690010610884, 0.265309989389116),
813: (0.734690017049771, 0.265309982950229),
814: (0.734689990304129, 0.265310009695871),
815: (0.734690009460412, 0.265309990539588),
816: (0.734689975482662, 0.265310024517338),
817: (0.734689971329393, 0.265310028670607),
818: (0.734689997544385, 0.265310002455615),
819: (0.734689981787834, 0.265310018212166),
820: (0.734689984286965, 0.265310015713035),
821: (0.734690064939548, 0.265309935060452),
822: (0.734690040936472, 0.265309959063528),
823: (0.734690106944952, 0.265309893055048),
824: (0.734689951098991, 0.265310048901009),
825: (0.734689969847467, 0.265310030152533),
826: (0.734689848977845, 0.265310151022155),
827: (0.734689857284722, 0.265310142715278),
828: (0.734690044343544, 0.265309955656456),
829: (0.734690179345217, 0.265309820654783),
830: (0.734689958783312, 0.265310041216688)} | true |
aebf84ee976b5b4aa831e42ee7f003784a6ff5db | Python | klmitch/hypocrite | /hypocrite/hypofile.py | UTF-8 | 30,220 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (C) 2017 by Kevin L. Mitchell <klmitch@mit.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import collections
import os
import six
from hypocrite import perfile
from hypocrite import template
# Represent an argument
HypoMockArg = collections.namedtuple('HypoMockArg', ['type_', 'name'])
# Represent a fixture injection
HypoFixtureInjection = collections.namedtuple(
'HypoFixtureInjection', ['fixture', 'inject']
)
def _extract_type(toks, delims):
"""
Helper to extract type and name information from a mock
declaration. Note that this version can only handle simple type
declarations; function pointers should be ``typedef``'d.
:param list toks: The list of tokens from which to extract the
type.
:param set delims: The delimiters to use when extracting the
types.
:returns: An iterator of 3-element tuples. The first element will
be a list of tokens forming the type; the second element
will be a token identified as the argument or function
name (which may be ``None`` if two delimiters followed
each other); and the third element will be the
delimiter. At the end of iteration, a final element is
returned with the delimiter set to ``None``.
"""
# Thin wrapper around split_toks()
for stack, delim in perfile.split_toks(toks, delims, True):
yield stack[:-1], stack[-1] if stack else None, delim
def _make_type(toks, directive, coord):
"""
Construct a type string from a sequence of tokens.
:param list toks: The list of tokens to form the type string from.
:param str directive: The name of the directive. This is used for
error reporting.
:param coord: The coordinates the tokens are from. This is used
for error reporting.
:type coord: ``Coordinates``
:returns: A type string.
:raises HypocriteException:
An error occurred while parsing the directive.
"""
type_ = []
last_tok = None
for tok in toks:
# Watch out for bogus token types
if tok.type_ != perfile.TOK_WORD and tok != (perfile.TOK_CHAR, '*'):
raise perfile.ParseException(
'Invalid %%%s directive at %s' % (directive, coord)
)
if (last_tok and tok.type_ == perfile.TOK_CHAR and
last_tok.type_ == perfile.TOK_CHAR):
# Avoid spaces between subsequent '*' tokens
type_[-1] += tok.value
else:
type_.append(tok.value)
last_tok = tok
# Create and return the type string
return ' '.join(type_)
class Preamble(object):
"""
Represent a "preamble" directive. These directives contain
literal code to include in the preamble for the test file.
"""
def __init__(self, coord_range, code):
"""
Initialize a ``Preamble`` instance.
:param coord_range: The range of coordinates associated with
the test in the hypocrite input file.
:type coord_range: ``hypocrite.location.CoordinateRange``
:param code: The code to include in the preamble.
:type code: ``hypocrite.linelist.LineList``
"""
self.coord_range = coord_range
self.code = code
def render(self, hfile, ctxt):
"""
Render a preamble. This adds the preamble to the "preamble"
section of a render context.
:param hfile: The hypocrite input file.
:type hfile: ``HypocriteFile``
:param ctxt: The render context.
:type ctxt: ``hypocrite.template.RenderContext``
"""
# Just add the code to the relevant section
ctxt.sections['preamble'] += self.code
class HypocriteTest(object):
"""
Represent a "test" directive. These directives contain the actual
test file.
"""
TEMPLATE = 'test.c.tmpl'
def __init__(self, coord_range, name, code, fixtures):
"""
Initialize a ``HypocriteTest`` instance.
:param coord_range: The range of coordinates associated with
the test in the hypocrite input file.
:type coord_range: ``hypocrite.location.CoordinateRange``
:param str name: The base name of the test.
:param code: The code of the test function.
:type code: ``hypocrite.linelist.LineList``
:param list fixtures: A list of ``HypoFixtureInjection``
instances indicating fixtures that
should be used with the test.
"""
self.coord_range = coord_range
self.name = name
self.code = code
self.fixtures = fixtures
def render(self, hfile, ctxt):
"""
Render a test. This uses a template to render the test into
actual output code.
:param hfile: The hypocrite input file.
:type hfile: ``HypocriteFile``
:param ctxt: The render context.
:type ctxt: ``hypocrite.template.RenderContext``
"""
# Resolve all the fixtures
fixtures = [
(hfile.fixtures[fix], inject) for fix, inject in self.fixtures
]
# Load the template and render it
tmpl = template.Template.get_tmpl(self.TEMPLATE)
tmpl.render(ctxt, name=self.name, code=self.code, fixtures=fixtures)
class HypocriteMock(object):
"""
Represent a "mock" directive. These directives describe functions
to be mocked out.
"""
TEMPLATE_VOID = 'mock-void.c.tmpl'
TEMPLATE = 'mock.c.tmpl'
def __init__(self, coord_range, name, return_type, args):
"""
Initialize a ``HypocriteMock`` instance.
:param coord_range: The range of coordinates associated with
the test in the hypocrite input file.
:type coord_range: ``hypocrite.location.CoordinateRange``
:param str name: The name of the function to mock.
:param str return_type: The type of the function return value.
:param list args: A list of ``HypoMockArg`` instances giving
the type and name of each function argument.
"""
self.coord_range = coord_range
self.name = name
self.return_type = return_type
self.args = args
def render(self, hfile, ctxt):
"""
Render a mock. This uses a template to render the mock into
actual output code.
:param hfile: The hypocrite input file.
:type hfile: ``HypocriteFile``
:param ctxt: The render context.
:type ctxt: ``hypocrite.template.RenderContext``
"""
# First, pick the correct template and load it
tmpl = template.Template.get_tmpl(
self.TEMPLATE_VOID if self.return_type == 'void' else self.TEMPLATE
)
# Render the template
tmpl.render(
ctxt, name=self.name, return_type=self.return_type, args=self.args
)
class Fixture(object):
"""
Represent a "fixture" directive. These directives describe
fixtures that prepare the test environment prior to executing a
given test.
"""
TEMPLATE = 'fixture.c.tmpl'
def __init__(self, coord_range, name, return_type, code, teardown=None):
"""
Initialize a ``Fixture`` instance.
:param coord_range: The range of coordinates associated with
the test in the hypocrite input file.
:type coord_range: ``hypocrite.location.CoordinateRange``
:param str name: The base name of the fixture.
:param str return_type: The type of the fixture return value.
May be ``None`` to indicate that the
fixture returns no values.
:param code: The code of the fixture.
:type code: ``hypocrite.linelist.LineList``
:param teardown: The cleanup code for the fixture. May be
``None``.
:type teardown: ``hypocrite.linelist.LineList``
"""
self.coord_range = coord_range
self.name = name
self.return_type = return_type
self.code = code
self.teardown = teardown
def render(self, hfile, ctxt):
"""
Render a fixture. This uses a template to render the fixture into
actual output code.
:param hfile: The hypocrite input file.
:type hfile: ``HypocriteFile``
:param ctxt: The render context.
:type ctxt: ``hypocrite.template.RenderContext``
"""
# Load the template
tmpl = template.Template.get_tmpl(self.TEMPLATE)
# Set up the correct arguments
args = {
'name': self.name,
'return_type': self.return_type,
'code': self.code,
}
if self.teardown:
args['teardown'] = self.teardown
# Render the template
tmpl.render(ctxt, **args)
class HypoParser(perfile.PerFileParser):
"""
Parser for hypocrite input files.
"""
DIRECTIVES = {}
@HypoParser.directive(None, 'target')
def target_directive(values, start_coord, toks):
"""
The ``%target`` directive. Should contain a single TOK_STR token
giving the name of the source file being tested.
:param dict values: The values dictionary that the directive's
return value may be placed in.
:param start_coord: The coordinates the directive started at.
:type start_coord: ``hypocrite.location.Coordinate``
:param list toks: A list of tokens.
:returns: A ``None`` value to indicate no further processing is
necessary.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Make sure the token list is correct
if len(toks) != 1 or toks[0].type_ != perfile.TOK_STR or not toks[0].value:
raise perfile.ParseException(
'Invalid %%file directive at %s' % start_coord
)
# Save the target file
values['target'] = toks[0].value
return None
@HypoParser.directive(lambda: [], 'preamble')
class PreambleDirective(object):
"""
The ``%preamble`` directive. This is a multi-line directive that
provides preamble code to include at the top of the generated test
file. Should contain a single TOK_CHAR token with the value '{'.
Will be ended by a '%}' directive, which must appear at the
beginning of a line.
"""
def __init__(self, values, start_coord, toks):
"""
Initialize a ``PreambleDirective`` instance.
:param dict values: The values dictionary that the directive's
return value may be placed in.
:param start_coord: The coordinates the directive started at.
:type start_coord: ``hypocrite.location.Coordinate``
:param list toks: A list of tokens.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Make sure the token list is correct
if len(toks) != 1 or toks[0] != (perfile.TOK_CHAR, '{'):
raise perfile.ParseException(
'Invalid %%preamble directive at %s' % start_coord
)
# Save the gunk we need for __call__()
self.values = values
self.start_coord = start_coord
def __call__(self, end_coord, buf, toks):
"""
Called once processing of the directive is complete.
:param end_coord: The coordinates at which the directive
processing completed.
:type end_coord: ``hypocrite.location.Coordinate``
:param list buf: A list of lines, including trailing newlines,
enclosed within the directive.
:param list toks: A list of tokens. Will be ``None`` if the
directive was closed by the end of file.
:returns: A ``None`` value to indicate no further processing
is necessary.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Check for errors
if toks is None:
raise perfile.ParseException(
'Unclosed %%preamble directive at end of file; '
'starts at %s' % self.start_coord
)
elif len(toks) != 0:
raise perfile.ParseException(
'Invalid end of %%preamble directive at %s' % end_coord
)
# Update the preamble data
self.values['preamble'].append(Preamble(
self.start_coord - end_coord, buf
))
return None
# Delimiters for type extraction for mocks
_mock_type_delims = {
(perfile.TOK_CHAR, '('), (perfile.TOK_CHAR, ','), (perfile.TOK_CHAR, ')')
}
@HypoParser.directive(lambda: {}, key='mocks')
def mock(values, start_coord, toks):
"""
The ``%mock`` directive. Should contain a sequence of tokens
declaring a function to be mocked, excluding any trailing
semicolon (';').
:param dict values: The values dictionary that the directive's
return value may be placed in.
:param start_coord: The coordinates the directive started at.
:type start_coord: ``hypocrite.location.Coordinate``
:param list toks: A list of tokens.
:returns: A ``None`` value to indicate no further processing is
necessary.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Initialize the type iterator
type_iter = _extract_type(toks, _mock_type_delims)
# First, have to collect the return type and function name
try:
type_, name, delim = six.next(type_iter)
except StopIteration: # pragma: no cover
# Shouldn't ever actually happen
raise perfile.ParseException(
'Invalid %%mock directive at %s' % start_coord
)
# Make sure the tokens make sense
if (not type_ or name.type_ != perfile.TOK_WORD or not name.value or
delim != (perfile.TOK_CHAR, '(')):
raise perfile.ParseException(
'Invalid %%mock directive at %s' % start_coord
)
# Initialize the mock information
func_name = name.value
return_type = _make_type(type_, 'mock', start_coord)
args = []
# Extract argument information
end_expected = False
for type_, name, delim in type_iter:
# Were we expecting the end of the directive?
if end_expected:
if type_ or name or delim:
raise perfile.ParseException(
'Unexpected tokens after %%mock directive at %s' %
start_coord
)
# Just here to exhaust the iterator for coverage
continue # pragma: no cover
# OK, was it the end of the directive?
elif not delim:
raise perfile.ParseException(
'Premature end of arguments in %%mock directive at %s' %
start_coord
)
# Found the closing parenthesis
elif delim == (perfile.TOK_CHAR, ')'):
end_expected = True
# Handles the case of 'void foo()' and 'void foo(void)'
if not args and ((not type_ and not name) or
(not type_ and
name == (perfile.TOK_WORD, 'void'))):
continue
# Sanity-check the argument
if (not type_ or
not name or name.type_ != perfile.TOK_WORD or not name.value or
delim == (perfile.TOK_CHAR, '(')):
raise perfile.ParseException(
'Invalid %%mock directive at %s' % start_coord
)
# Save the argument
args.append(
HypoMockArg(_make_type(type_, 'mock', start_coord), name.value)
)
# Construct and save the mock
values['mocks'][func_name] = HypocriteMock(
start_coord - start_coord, func_name, return_type, args
)
@HypoParser.directive(collections.OrderedDict, 'test', 'tests')
class TestDirective(object):
"""
The ``%test`` directive. This is a multi-line directive that
describes a single test to be included in the generated test file.
Should contain a TOK_WORD token giving the name of the test,
followed by a TOK_CHAR token with the value '{'. Will be ended by
a '%}' directive, which must appear at the beginning of a line.
"""
def __init__(self, values, start_coord, toks):
"""
Initialize a ``TestDirective`` instance.
:param dict values: The values dictionary that the directive's
return value may be placed in.
:param start_coord: The coordinates the directive started at.
:type start_coord: ``hypocrite.location.Coordinate``
:param list toks: A list of tokens.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Make sure the token list is correct
if (len(toks) < 2 or toks[0].type_ != perfile.TOK_WORD or
not toks[0].value or toks[-1] != (perfile.TOK_CHAR, '{')):
raise perfile.ParseException(
'Invalid %%test directive at %s' % start_coord
)
# Save the gunk we need for __call__()
self.name = toks[0].value
self.fixtures = []
self.values = values
self.start_coord = start_coord
# Extract the optional fixtures
if len(toks) > 2:
if (toks[1] != (perfile.TOK_CHAR, '(') or
toks[-2] != (perfile.TOK_CHAR, ')')):
raise perfile.ParseException(
'Invalid %%test directive at %s' % start_coord
)
for fix_toks in perfile.split_toks(toks[2:-2],
{(perfile.TOK_CHAR, ',')}):
# Sanity-check the tokens
if len(fix_toks) < 1 or len(fix_toks) > 2:
raise perfile.ParseException(
'Invalid fixture specification in %%test directive '
'at %s' % start_coord
)
# Determine if it's an injectable
inject = True
if fix_toks[0] == (perfile.TOK_CHAR, '!'):
inject = False
fix_toks.pop(0)
# Determine the fixture name
if (len(fix_toks) != 1 or
fix_toks[0].type_ != perfile.TOK_WORD or
not fix_toks[0].value):
raise perfile.ParseException(
'Invalid fixture specification in %%test directive '
'at %s' % start_coord
)
# Add the fixture injection
self.fixtures.append(HypoFixtureInjection(
fix_toks[0].value, inject
))
def __call__(self, end_coord, buf, toks):
"""
Called once processing of the directive is complete.
:param end_coord: The coordinates at which the directive
processing completed.
:type end_coord: ``hypocrite.location.Coordinate``
:param list buf: A list of lines, including trailing newlines,
enclosed within the directive.
:param list toks: A list of tokens. Will be ``None`` if the
directive was closed by the end of file.
:returns: A ``None`` value to indicate no further processing
is necessary.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Check for errors
if toks is None:
raise perfile.ParseException(
'Unclosed %%test directive at end of file; starts at %s' %
self.start_coord
)
elif len(toks) != 0:
raise perfile.ParseException(
'Invalid end of %%test directive at %s' % end_coord
)
# Save the test data
self.values['tests'][self.name] = HypocriteTest(
self.start_coord - end_coord, self.name, buf, self.fixtures
)
return None
@HypoParser.directive(lambda: {}, 'fixture', 'fixtures')
class FixtureDirective(object):
"""
The ``%fixture`` directive. This is a multi-line directive that
describes a named fixture to execute before a test. The directive
may be followed by another multiline directive to provide cleanup
code.
"""
def __init__(self, values, start_coord, toks):
"""
Initialize a ``FixtureDirective`` instance.
:param dict values: The values dictionary that the directive's
return value may be placed in.
:param start_coord: The coordinates the directive started at.
:type start_coord: ``hypocrite.location.Coordinate``
:param list toks: A list of tokens.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Parse the directive
end_expected = False
for type_, name, delim in _extract_type(toks,
{(perfile.TOK_CHAR, '{')}):
# Were we expecting the end of the directive?
if end_expected:
if type_ or name or delim:
raise perfile.ParseException(
'Unexpected tokens after %%fixture directive at %s' %
start_coord
)
# Just here to exhaust the iterator for coverage
continue # pragma: no cover
# OK, was it the end of the directive?
elif not delim:
raise perfile.ParseException(
'Premature end of arguments in %%fixture directive at %s' %
start_coord
)
# Found the open brace
end_expected = True
# Sanity-check the name token
if not name or name.type_ != perfile.TOK_WORD or not name.value:
raise perfile.ParseException(
'Invalid %%fixture directive at %s' % start_coord
)
# Save the fixture's type and name
self.name = name.value
self.type_ = (
None if not type_ or type_ == [(perfile.TOK_WORD, 'void')]
else _make_type(type_, 'fixture', start_coord)
)
self.values = values
self.start_coord = start_coord
self.block_start = start_coord
def __call__(self, end_coord, buf, toks):
"""
Called once processing of the directive is complete.
:param end_coord: The coordinates at which the directive
processing completed.
:type end_coord: ``hypocrite.location.Coordinate``
:param list buf: A list of lines, including trailing newlines,
enclosed within the directive.
:param list toks: A list of tokens. Will be ``None`` if the
directive was closed by the end of file.
:returns: A ``None`` value to indicate no further processing
is necessary, or a callable to collect the remaining
lines.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Check for errors
if toks is None:
raise perfile.ParseException(
'Unclosed %%fixture directive at end of file; starts at %s' %
self.block_start
)
elif (len(toks) != 0 and
toks != [(perfile.TOK_WORD, 'teardown'),
(perfile.TOK_CHAR, '{')]):
raise perfile.ParseException(
'Invalid %%teardown directive at %s' % end_coord
)
# If we have a teardown clause, save the code clause and chain
if toks:
self.code = buf
self.block_start = end_coord # update to start of teardown
return self.teardown
# Create the fixture
self.values['fixtures'][self.name] = Fixture(
self.start_coord - end_coord, self.name, self.type_, buf
)
return None
def teardown(self, end_coord, buf, toks):
"""
Called once processing of the teardown directive is complete.
:param end_coord: The coordinates at which the directive
processing completed.
:type end_coord: ``hypocrite.location.Coordinate``
:param list buf: A list of lines, including trailing newlines,
enclosed within the directive.
:param list toks: A list of tokens. Will be ``None`` if the
directive was closed by the end of file.
:returns: A ``None`` value to indicate no further processing
is necessary.
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the directive.
"""
# Check for errors
if toks is None:
raise perfile.ParseException(
'Unclosed %%teardown directive at end of file; starts at %s' %
self.block_start
)
elif len(toks) != 0:
raise perfile.ParseException(
'Invalid end of %%teardown directive at %s' % end_coord
)
# Create the fixture
self.values['fixtures'][self.name] = Fixture(
self.start_coord - end_coord, self.name, self.type_, self.code, buf
)
return None
class HypoFile(object):
"""
Represent a hypocrite input file. These files consist of a
sequence of comments (both C-style "/* */" and C++-style "//"
comments are recognized) and directives, introduced by '%'
characters. Some directives are multi-line directives, delimited
by an open '{' on the directive line and a '%}' on another line to
indicate the end of the directive contents. Directive lines may
be continued to the next line by ending them with a '\\'
character.
"""
TEMPLATE = 'master.c.tmpl'
@classmethod
def parse(cls, path):
"""
Parse a file into a ``HypoFile`` instance.
:param str path: The path to the hypocrite input file.
:returns: An initialized hypocrite file representation.
:rtype: ``HypoFile``
:raises hypocrite.perfile.ParseException:
An error occurred while parsing the input file.
"""
# Grab a parser instance
parser = HypoParser()
# Parse the input file
with open(path, 'rU') as stream:
values = parser.parse(stream, os.path.basename(path))
return cls(path, **values)
def __init__(self, path, target, preamble, tests, mocks, fixtures):
"""
Initialize a ``HypoFile`` instance.
:param str path: The path to the hypocrite input file.
:param str target: The target file being tested. This will be
included by the generated test file.
:param list preamble: A list of preambles (instances of
``Preamble``) listing preambles to
include in the generated test file.
:param dict tests: A dictionary (probably a
``collections.OrderedDict``) mapping test
names to test descriptions (instances of
``HypocriteTest``).
:param dict mocks: A dictionary mapping the names of C
functions to mock to descriptions of those
mocks (instances of ``HypocriteMock``).
:param dict fixtures: A dictionary mapping the names of test
fixtures to descriptions of those
fixtures (instances of ``Fixture``).
"""
self.path = path
self.target = target
self.preamble = preamble
self.tests = tests
self.mocks = mocks
self.fixtures = fixtures
def render(self, test_fname):
"""
Render the ``HypoFile`` instance into an output file.
:param str test_fname: The base name of the test file.
:returns: A list of lines to be emitted to the output file.
:rtype: ``hypocrite.linelist.LineList``
"""
# First, set up a render context
ctxt = template.RenderContext()
# Now render all the elements, starting with the preamble
for preamble in self.preamble:
preamble.render(self, ctxt)
for test in self.tests.values():
test.render(self, ctxt)
for _name, mock in sorted(self.mocks.items(), key=lambda x: x[0]):
mock.render(self, ctxt)
for _name, fix in sorted(self.fixtures.items(), key=lambda x: x[0]):
fix.render(self, ctxt)
# Grab the master template
tmpl = template.Template.get_tmpl(self.TEMPLATE)
# Render it and return the code
return tmpl.render(
ctxt,
source=os.path.basename(self.path),
target=self.target,
test_fname=test_fname,
)
| true |
0983e36b718cd578900d57e17432fba13a77b602 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_218/ch4_2020_03_23_20_02_59_320523.py | UTF-8 | 194 | 3.078125 | 3 | [] | no_license | def classifica_idade(idade):
x = idade
if idade <= 11:
print('crianca')
elif idade >= 12 and idade <=17:
print('adolescente')
elif idade >= 18:
print('adulto')
return y | true |
d64d3ca75478e03e8fbc3b98142d5e933b5367ea | Python | akashvshroff/DSA_Coursera_Specialisation | /Algorithms_on_Graphs/week5_assignment/clustering.py | UTF-8 | 2,151 | 3.625 | 4 | [] | no_license | # Uses python3
import sys
import math
import itertools as it
class DisjointSet:
"""
Disjoint set implementation to use for the Kruskal's algorithm.
"""
def __init__(self, n):
self.parent = [None for _ in range(n)]
self.rank = [None for _ in range(n)]
self.num_sets = n
def make_set(self, i):
self.parent[i] = i
self.rank[i] = 0
def find(self, i):
if i != self.parent[i]:
self.parent[i] = self.find(self.parent[i])
return self.parent[i]
def union(self, i, j):
root_i = self.find(i)
root_j = self.find(j)
if root_i == root_j:
return
self.num_sets -= 1
if self.rank[root_i] > self.rank[root_j]:
self.parent[root_j] = root_i
else:
self.parent[root_i] = root_j
if self.rank[root_i] == self.rank[root_j]:
self.rank[root_j] = self.rank[root_i] + 1
def kruskal(edges, n, k, x, y):
"""
Modified kruskal's algorithm to find the maximum distance between k-clusters
of a graph.
"""
ds = DisjointSet(n)
for i in range(n):
ds.make_set(i)
for u, v in edges:
if ds.find(u) != ds.find(v): # not in the same set
if ds.num_sets == k:
return edge_sort((u, v))
else:
ds.union(u, v)
def distance(x1, y1, x2, y2):
"""
Returns distance between 2 points on a cartesian graph.
"""
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def edge_sort(edge):
u, v = edge
return distance(x[u], y[u], x[v], y[v])
def clustering(x, y, k, n):
"""
Call upon Kruskal's algorithm after sorting all the edges in ascending
order as per distance.
"""
edges = list(it.combinations(range(n), 2))
edges.sort(key=edge_sort)
return kruskal(edges, n, k, x, y)
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
x = data[0:2 * n:2]
y = data[1:2 * n:2]
data = data[2 * n:]
k = data[0]
print("{0:.9f}".format(clustering(x, y, k, n)))
| true |
1cf6f35656b0ee0d73dd11b1b9e486e6c0430374 | Python | stathwan/semi-supervised-learning | /vae_pytorch.py | UTF-8 | 6,774 | 2.96875 | 3 | [] | no_license | '''
reference
https://arxiv.org/abs/1312.6114
https://github.com/wohlert/semi-supervised-pytorch
'''
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
cuda = torch.cuda.is_available()
import matplotlib.pyplot as plt
class Encoder(nn.Module):
'''
distribution q_phi (z|x) that infer p(z|x)
'''
def __init__(self,dims):
super(Encoder,self).__init__()
[x_dim, h_dim, z_dim] = dims # h_dim is list
nodes = [x_dim, *h_dim]
linear_layers = [ nn.Linear(nodes[idx-1],nodes[idx] ) for idx in range(1,len(nodes))] # linear_layers is list which is consist of nn.Linear()
self.hidden = nn.ModuleList(linear_layers) # pytorch does not see nn.module's parameter in python list.
self.mu = nn.Linear(h_dim[-1], z_dim)
self.log_var = nn.Linear(h_dim[-1], z_dim)
def forward(self,x): # nn architecture
for layer in self.hidden:
x = F.relu(layer(x))
mu = self.mu(x)
log_var = F.softplus(self.log_var(x)) # f(x) = log(1+expx)
return mu , log_var
class Sample(nn.Module):
def __init__(self):
super(Sample,self).__init__()
def reparametrize(self,mu, log_var):
epsilon = Variable(torch.randn(mu.size()), requires_grad = False)
if mu.is_cuda:
epsilon = epsilon.cuda()
# log_var*0.5 = log_std ' exp(log_std) = std
# log_var
std = log_var.mul(0.5).exp_()
#z= mu + std*epsilon
_z = mu.addcmul(std, epsilon)
return _z
def forward(self,x):
mu, log_var = x
z = self.reparametrize(mu, log_var)
return z
class Decoder(nn.Module):
'''
dist. p_theta (x|z)
'''
def __init__(self,dims):
super(Decoder,self).__init__()
[z_dim, h_dim, x_dim] = dims #h_dim is reversed
nodes = [z_dim, *h_dim]
linear_layer = [ nn.Linear(nodes[idx-1],nodes[idx]) for idx in range(1, len(nodes))]
self.hidden = nn.ModuleList(linear_layer)
self.last_layer = nn.Linear(h_dim[-1], x_dim)
self.sigmoid = nn.Sigmoid()
def forward(self,x):
for layer in self.hidden:
x = F.relu(layer(x))
reconstruction = self.sigmoid(self.last_layer(x))
return reconstruction
def log_gaussian(x, mu, log_var):
"""
Returns the log pdf of a normal distribution parametrised
by mu and log_var evaluated at x.
:param x: point to evaluate
:param mu: mean of distribution
:param log_var: log variance of distribution
:return: log N(x|µ,σ)
"""
mu=torch.tensor(mu).type(torch.float32)
log_var=torch.tensor(log_var).type(torch.float32)
log_pdf = - 0.5 * math.log(2 * math.pi) - log_var / 2 - (x - mu)**2 / (2 * torch.exp(log_var))
return torch.sum(log_pdf, dim=-1)
class vae(nn.Module):
def __init__(self,dims,p_param):
super(vae,self).__init__()
self.p_param = p_param
[x_dim, h_dim, z_dim] = dims
self.z_dim = z_dim
self.encoder = Encoder([x_dim, h_dim, z_dim])
self.decoder = Decoder([z_dim, list(reversed(h_dim)), x_dim])
self.kl_divergence = 0
for m in self.modules(): # Returns an iterator over all modules in the network.
if isinstance(m, nn.Linear):
nn.init.xavier_normal(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def _kld(self, z, q_param):
(mu_hat, log_var_hat) = q_param
qz = log_gaussian(z, mu_hat, log_var_hat)
(mu, log_var) = self.p_param
pz = log_gaussian(z, mu, log_var)
kl = qz - pz
return kl
def forward(self, x, y=None):
"""
Runs a data point through the model in order
to provide its reconstruction and q distribution
parameters.
:param x: input data
:return: reconstructed input
"""
z_mu, z_log_var = self.encoder(x)
z = Sample()((z_mu, z_log_var))
self.kl_divergence = self._kld(z, (z_mu, z_log_var))
x_mu = self.decoder(z)
return x_mu
def sample(self, z):
"""
Given z ~ N(0, I) generates a sample from
the learned distribution based on p_θ(x|z).
:param z: (torch.autograd.Variable) Random normal variable
:return: (torch.autograd.Variable) generated sample
"""
return self.decoder(z)
# use custom BCE to sum up with Regularization term
def binary_cross_entropy(pred_y,y):
return -torch.sum(y*torch.log(pred_y+1e-8)+ (1-y)*torch.log(1-pred_y + 1e-8), dim=-1)
from torchvision import datasets, transforms
batch_size=64
test_batch_size=1000
### load data
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])), batch_size=batch_size, shuffle=True, **kwargs)
###
(mu, log_var) = (0, 0)
p_param = (mu, log_var)
dims=([784, [256, 128], 32])
model = vae(dims,p_param)
optimizer = torch.optim.Adam(model.parameters())
for epoch in range(50):
model.train()
total_loss = 0
for (batch_x, _) in train_loader:
batch_x=batch_x.view(-1,28*28)
if cuda: batch_x = batch_x.cuda(device=0)
reconstruction = model(batch_x)
likelihood = -binary_cross_entropy(reconstruction, batch_x)
elbo = likelihood - model.kl_divergence
L = -torch.mean(elbo)
L.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += L.data[0]
m = len(train_loader)
if epoch % 1 == 0:
print("Epoch: {} \t L: {}".format(epoch,total_loss/m))
model.eval()
x_mu = model.sample(Variable(torch.randn(16, 32)))
f, axarr = plt.subplots(1, 16, figsize=(18, 12))
samples = x_mu.data.view(-1, 28, 28).numpy()
for i, ax in enumerate(axarr.flat):
ax.imshow(samples[i])
ax.axis("off")
| true |
19425ddf3cbbe6b81b78e196e7b8029c7a8eb3e3 | Python | jmulford-bw/email-sms-integration | /send_mailgun_email/views.py | UTF-8 | 2,469 | 3 | 3 | [] | no_license | """
Implementation of SMS to Email integration using the Bandwidth messaging API
and Mailgun API. This route allows an end user to text a Bandwidth number
that is set up to make a POST request on this route,
and have the contents of that text be received by an email through the Mailgun
API. The email received will be sent from <number>@<domain> where
<number> is the phone number of the end user sending the text.
"""
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import requests
import json
import os
MAILGUN_API_KEY = os.environ['MAILGUN_API_KEY']
MAILGUN_EMAIL_DOMAIN = os.environ['MAILGUN_EMAIL_DOMAIN']
DESTINATION_EMAIL = os.environ['DESTINATION_EMAIL']
def get_message(post_request):
"""
Takes a POST request and extracts the message from it.
:post_request :HttpRequest A Django POST HttpRequest
:return :String The message from the text
"""
return json.loads(post_request.body)["text"]
def get_source(post_request):
"""
Takes a POST request and extracts the source from it.
:post_request :HttpRequest A Django POST HttpRequest
:return :String The source from the text
"""
return json.loads(post_request.body)["from"]
def send_message(message, source_email, destination, url, api_key, request_function):
"""
Sends the message using the Mailgun API.
:message :String The message to be sent
:source :String The source (phone number) of the message
:destination :String The (email) destination of the message
"""
request_function.post(
url,
auth=("api", api_key),
data={
"from": source_email,
"to": destination,
"text": message
})
@csrf_exempt
def send_mailgun_email(request):
if request.method == "GET":
return HttpResponse('hello')
elif request.method == "POST":
# Get information to send message
destination = DESTINATION_EMAIL
message = get_message(request)
source = get_source(request)
# Send message
mailgun_url = "https://api.mailgun.net/v3/{}/messages".format(
MAILGUN_EMAIL_DOMAIN
)
source_email = "{}@{}".format(source, MAILGUN_EMAIL_DOMAIN)
send_message(
message,
source_email,
destination,
mailgun_url,
MAILGUN_API_KEY,
requests
)
return HttpResponse("success")
| true |
68265b36e03e17728aac0fa8bdf3406e51f7774b | Python | zx576/tu-weibo | /aikeke/tofile.py | UTF-8 | 502 | 2.75 | 3 | [] | no_license | #coding=utf-8
import csv
from models import Aikeke
class ToCsv:
def __init__(self):
pass
def _insertrow(self, lst):
with open('aikeke.csv', 'a+', encoding='utf-8', newline='')as f:
writer = csv.writer(f)
writer.writerow(lst)
def _extract(self):
# 作为示例,输出前 100 条
query = Aikeke.select()[:500]
for item in query:
self._insertrow([item.content, item.url])
def work(self):
return self._extract()
if __name__ == '__main__':
tc = ToCsv()
tc.work()
| true |
f3c5c521026fb1824306c4033a89dae37c1a11b2 | Python | Kcpf/robot21.1 | /robot-aps3-v2-tuesday09/biblioteca_cow.py | UTF-8 | 5,979 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
import os
def load_mobilenet():
"""Não mude ou renomeie esta função
Carrega o modelo e os parametros da MobileNet. Retorna a classe da rede.
"""
proto = "./mobilenet_detection/MobileNetSSD_deploy.prototxt.txt" # descreve a arquitetura da rede
model = "./mobilenet_detection/MobileNetSSD_deploy.caffemodel" # contém os pesos da rede em si
net = cv2.dnn.readNetFromCaffe(proto, model)
return net
def detect(net, frame, CONFIDENCE, COLORS, CLASSES):
"""
Recebe - uma imagem colorida BGR
Devolve: objeto encontrado
"""
"""
Recebe - uma imagem colorida BGR
Devolve: objeto encontrado
"""
image = frame.copy()
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
results = []
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > CONFIDENCE:
# extract the index of the class label from the `detections`,
# then compute the (x, y)-coordinates of the bounding box for
# the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# display the prediction
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
print("[INFO] {}".format(label))
cv2.rectangle(image, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(image, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
results.append((CLASSES[idx], confidence*100, (startX, startY),(endX, endY) ))
# show the output image
return image, results
def separar_caixa_entre_animais(img, resultados):
"""Não mude ou renomeie esta função
recebe o resultados da MobileNet e retorna dicionario com duas chaves, 'vaca' e 'lobo'.
Na chave 'vaca' tem uma lista de cada caixa que existe uma vaca, no formato: [ [min_X, min_Y, max_X, max_Y] , [min_X, min_Y, max_X, max_Y] ]. Desenhe um azul em cada vaca
Na chave 'lobo' tem uma lista de uma unica caixa que engloba todos os lobos da imagem, no formato: [min_X, min_Y, max_X, max_Y]. Desenhe um vermelho sobre os lobos
"""
contorno = img.copy()
animais = {}
animais['vaca'] = []
animais['lobo'] = []
for array in resultados:
if array[0] == 'cow':
coord = [array[2][0], array[2][1], array[3][0], array[3][1]]
animais['vaca'].append(coord)
elif array[0] == 'dog' or 'sheep' or 'horse':
coord = [array[2][0], array[2][1], array[3][0], array[3][1]]
animais['lobo'].append(coord)
vaca = animais['vaca']
lobo = animais['lobo']
minx, miny, maxx, maxy = 7000, 7000, 0, 0
cordLobo = [minx,miny,maxx,maxy]
for vaca in vaca:
cv2.rectangle(contorno, (vaca[0], vaca[1]), (vaca[2], vaca[3]), (255,0,0), 6)
for lobo in lobo:
if lobo[0] < minx:
minx = lobo[0]
if lobo[1] < miny:
miny = lobo[1]
if lobo[2] > maxx:
maxx = lobo[2]
if lobo[3] > maxy:
maxy = lobo[3]
cordLobo = [minx,miny,maxx,maxy]
cv2.rectangle(contorno, (minx, miny), (maxx, maxy), (0,0,255), 6)
return contorno, animais
def calcula_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def checar_perigo(image, animais):
"""Não mude ou renomeie esta função
Recebe as coordenadas das caixas, se a caixa de uma vaca tem intersecção com as do lobo, ela esta em perigo.
Se estiver em perigo, deve escrever na imagem com a cor vermlha, se não, escreva com a cor azul.
Repita para cada vaca na imagem.
"""
vaca = animais['vaca']
lobo = animais['lobo']
minx, miny, maxx, maxy = 7000, 7000, 0, 0
for lobo in lobo:
if lobo[0] < minx:
minx = lobo[0]
if lobo[1] < miny:
miny = lobo[1]
if lobo[2] > maxx:
maxx = lobo[2]
if lobo[3] > maxy:
maxy = lobo[3]
cordLobo = [minx,miny,maxx,maxy]
for vaca in vaca:
iou = calcula_iou(vaca,cordLobo)
if iou > 0.05:
cv2.putText(image, str('Vaca em perigo !!!'), (vaca[0],vaca[1]), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),5)
else:
cv2.putText(image, str('Vaca segura'), (vaca[0],vaca[1]), cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),5)
return image | true |
852b921a97bec94ad72e9a6498d95f5d3cc1833b | Python | cocaer/simpleNMT | /get_vocab.py | UTF-8 | 334 | 2.703125 | 3 | [] | no_license | # cat /data4/bjji/data/ldc/train.bpe.ch | python get_vocab.py > vocab.ch
import sys
from collections import Counter
cnt = Counter()
for line in sys.stdin.readlines():
words = line.rstrip().split()
for w in words:
cnt[w] += 1
res = cnt.most_common(len(cnt))
for k in res:
print(f"{k[0]} {k[1]}") | true |
9c3da76a6b0ed236010dca23e85b941c7dd915ee | Python | bright-night-sky/algorithm_study | /백준/Bronze/Bronze 3/10833번 ; 사과.py | UTF-8 | 951 | 3.640625 | 4 | [] | no_license | # https://www.acmicpc.net/problem/10833
# readline을 사용하기 위해 import합니다.
from sys import stdin
# 첫째 줄에는 학교의 수를 나타내는 정수 N을 입력합니다.
# 1 <= N <= 100
# 정수형으로 변환합니다.
N = int(stdin.readline())
# 남은 사과의 총 개수를 저장할 변수를 선언합니다.
# 0으로 초기화합니다.
remain_apples = 0
# 학교의 수 N만큼 반복합니다.
for school_idx in range(N):
# 학교의 학생 수와 사과 개수를 공백으로 구분해 입력합니다.
# 학생 수와 사과 개수 모두 1 이상 100 이하입니다.
# 각각 정수형으로 변환합니다.
students, apples = map(int, stdin.readline().split(' '))
# 사과 개수를 학생 수로 나눈 뒤 나온 나머지를 남은 사과의 총 개수에 더해줍니다.
remain_apples += apples % students
# 남은 사과의 총 개수를 출력합니다.
print(remain_apples) | true |
c6cc7f25fa32f18fd50ba5389759c0ad97791e4a | Python | jyotikush/SEM-1 | /Program1.py | UTF-8 | 278 | 3.890625 | 4 | [] | no_license | a = '''jyoti'''
b = 123
c = 23.2
d = True
# printing the variables
print(a)
print(b)
print(c)
print(d)
e = 2
f = 3
print("The value of 2+3 is",2+3)
print("The value of 2-3 is",2-3)
print("The value of 2*3 is",2*3)
print("The value of 2/3 is",2/3)
| true |
fca58f5543fd3cf90167a057778dee52035b4e6d | Python | periakiva/Google-Interview-Dev-Guide-Sols | /findpower.py | UTF-8 | 367 | 3.0625 | 3 | [] | no_license | import math
def findPow(n):
factor=2
ntemp=n
for base in xrange(0,int(math.sqrt(n))):
for factor in xrange(2,n):
print "base^factor: " + str(math.pow(base,factor))
if math.pow(base,factor)==n:
return True
if math.pow(base,factor)>n:
continue
return False
print findPow(120)
| true |
85d57f08fbc9eb65d5b5dbaeb05ab966e78adb60 | Python | parmita/Stats-Basics-in-Python | /kMeans_Irisdataset.py | UTF-8 | 2,164 | 3.046875 | 3 | [] | no_license | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
iris = pd.read_table("iris.txt", sep=',', names=('SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm','PetalWidthCm','Species'))
print (iris.head())
X = iris.drop('Species',axis=1).values
y = pd.Categorical(iris['Species']).codes
from sklearn.cluster import KMeans
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(8, 6))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title(name, loc='left', fontsize=15)
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(8, 6))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show() | true |
1079b0fd411602a48ffee6f71d10c0dc6f934915 | Python | modulexcite/opentls | /tests/c/test_rand.py | UTF-8 | 4,793 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | """Test RAND API
The objective is to test the API wrapper, not the underlying random number
generators. The tests implemented were derived from John D Cook's chapter
in 'Beautiful Testing' titled 'How to test a random number generator'.
http://www.johndcook.com/blog/2010/12/06/how-to-test-a-random-number-generator-2/
"""
from __future__ import absolute_import, division, print_function
import math
import random
try:
import unittest2 as unittest
except ImportError:
import unittest
from tls.c import api
def cumulative_average(average=0.0, samples=0.0):
"""Generator to keep track of the current cummulative moving average.
To use:
>>> average = cumulative_average()
>>> average.send(None)
>>> for value in [1, 2, 3, 4]:
... mean = average.send(value)
... print mean
2.5
The function arguments `average` and `samples` can be used to set the
cumulative average's initial state.
http://en.wikipedia.org/wiki/Moving_average#Cumulative_moving_average
"""
cma = average
cnt = samples
while True:
new = yield cma
cnt += 1.0
cma = cma + (new - cma) / cnt
class RandTests(object):
def test_range(self):
"""Test extremes of valid range for random values has been generated.
The probability of failure is less than 0.005e-17 for 10000 samples.
"""
low = min(self.data)
high = max(self.data)
self.assertEqual(high, 255)
self.assertEqual(low, 0)
def test_median(self):
"""Test that the median is "close" to the expected mean."""
sorted_ = sorted(self.data)
median = sorted_[self.samples // 2]
self.assertAlmostEqual(median, 127.5, delta=5.0)
def test_mean(self):
"""Test the actual mean is "close" to the expected mean."""
average = cumulative_average()
average.send(None)
for value in self.data:
mean = average.send(value)
self.assertAlmostEqual(mean, 127.5, delta=3.0)
def test_variance(self):
"""Test the variance is "close" to the expected mean."""
expected_mean = 255 // 2
average = cumulative_average()
average.send(None)
for value in self.data:
deviation_squared = (value - expected_mean) ** 2
variance = average.send(deviation_squared)
expected_variance = (expected_mean // 2) ** 2
self.assertAlmostEqual(variance, expected_variance, delta=expected_variance // 2)
def test_buckets(self):
"""Test the distribution of values across the range."""
counts = {}
for value in self.data:
counts[value] = 1 + counts.get(value, 0)
for value, count in counts.items():
self.assertGreater(count, 0)
self.assertLess(count, 2.0 * (self.samples / 255.0))
def test_kolmogorov_smirnov(self):
"""Apply the Kolmogorov-Smirnov goodness-of-fit function.
Range values for K+ sourced from 'Beautiful Testing'
"""
samples = 1e3
counts = {}
for num, value in enumerate(self.data):
if num >= samples:
break
for x in range(value + 1):
counts[x] = 1 + counts.get(x, 0)
empirical = [counts.get(i, 0) / samples for i in range(256)]
theoretical = [1.0 - (x / 255.0) for x in range(256)]
kplus = math.sqrt(samples) * max(empirical[i] - theoretical[i] for i in range(256))
self.assertGreaterEqual(kplus, 0.07089)
self.assertLessEqual(kplus, 1.5174)
#kminus = math.sqrt(samples) * max(theoretical[i] - empirical[i] for i in range(256))
#self.assertGreaterEqual(kminus, 0.07089)
#self.assertLessEqual(kminus, 1.5174)
class TestPRNG(unittest.TestCase, RandTests):
"""Test OpenSSL's pseudo random number generator"""
samples = int(1e4)
data = api.new('unsigned char[]', samples)
@classmethod
def setUpClass(cls):
if not api.RAND_status():
api.RAND_load_file(b"/dev/urandom", 1024)
api.RAND_pseudo_bytes(api.cast('unsigned char*', cls.data), cls.samples)
def setUp(self):
self.assertTrue(api.RAND_status())
class TestCryptoRNG(unittest.TestCase, RandTests):
"""Test OpenSSL's crytographically valid random data"""
samples = int(1e4)
data = api.new('unsigned char[]', samples)
@classmethod
def setUpClass(cls):
api.RAND_bytes(api.cast('unsigned char*', cls.data), cls.samples)
class TestPyRandom(unittest.TestCase, RandTests):
"""Test Python's Mersenne Twister implementation"""
samples = int(1e4)
@classmethod
def setUpClass(cls):
cls.data = [random.randint(0, 255) for i in range(cls.samples)]
| true |
f9eaada99d6b9fae7dc360cc236e88de646443c0 | Python | FGtatsuro/myatcoder | /acl_beginner_contest/C.py | UTF-8 | 780 | 2.984375 | 3 | [
"MIT"
] | permissive | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, m = map(int, input().split())
graph = [0] + [[] for _ in range(n)]
for _ in range(m):
n1, n2 = map(int, input().split())
graph[n1].append(n2)
graph[n2].append(n1)
from collections import deque
def bfs(graph, queue, dist):
while queue:
current = queue.popleft()
for _next in graph[current]:
if dist[_next] != -1:
continue
else:
dist[_next] = dist[current] + 1
queue.append(_next)
dist = [0] + [-1] * n
part = 0
for i in range(1, n + 1):
if dist[i] != -1:
continue
else:
queue = deque([i])
dist[i] = 0
bfs(graph, queue, dist)
part += 1
print(part - 1)
| true |
9f459f9d52cd4dd965f4dc7349d8419ee11bca18 | Python | ViktorKovinin/ds_python | /1.syntax/3.lists.py | UTF-8 | 2,312 | 4.40625 | 4 | [] | no_license | # **** Списки(lists) ****
# создание пустого списка
my_list = []
my_list_2 = list()
# метод списков append, который добавляет объект в конец списка
my_list_2.append(100)
my_list_2.append(100)
my_list_2.append(777)
my_list_2.append("hello")
my_list_2.append([1,2,3])
# обращение к элкментам списка
el = my_list_2[0] # извлечение значения по индексу
del my_list_2[1] # удаление элемента по индексу
my_list_2[1] = 200 # замена значения
# создание заполненного списка
my_list = [10, 20, 30,40,"A","B"]
s = "привет, Мир!"
my_list_3 = list(s)
# *** функция range() ***
# range(end),создается набор чисел от 0 до числа end (не включительно)
# range(start, end) создается набор чисел от числа start до числа end (не включительно)
# range(start, end, step) создается набор чисел от числа start до числа end (не включительно) c шагом step
numbers = list(range(5))
numbers = list(range(1,5))
numbers = list(range(1,10,2))
numbers = list(range(10,1,-1))
# *** методы списка ***
a = [10,20,30,40,50]
# append - метод добавления элемента
# объект.метод() метод идет всегда со скобочками(даже если они пустые), так как метод это функция и направлена к определнному типу данных
a.append (100)
# insert - метод, котрый добавляет элемент в списко по индексу
a.insert(0, 7)
# remove - удаляет элемент по значению
a.remove(30)
# clear - метод, который очищает список
# print(a)
# a.clear()
# sort - сортировка списка
b = [8,2,1,7,3,2,8,4,1]
# b.sort()
# b.sort(reverse=True)
# b.sort(reverse=False)
# reverse - метод "поворота" списка
c = [1,2,3]
c.reverse()
# pop(
# count() - считает сколько одинаковых элементов в списке
# index() -
print(c) | true |
ac0860d6acae923abf83e6c2a57c5006fafa9521 | Python | s-boardman/PyGlow | /examples/clock.py | UTF-8 | 2,398 | 3.234375 | 3 | [
"MIT"
] | permissive | #####
#
# PyGlow
#
#####
#
# Python module to control Pimoronis PiGlow
# [http://shop.pimoroni.com/products/piglow]
#
# * clock.py - binary clock by Jason (@Boeeerb) & Remi (rparpa)
# [https://github.com/Boeeerb/PiGlow]
#
#####
from PyGlow import PyGlow
from time import sleep
from datetime import datetime
pyglow = PyGlow()
##
# You can customise these settings:
##
# Show 12 or 24hr clock - 0= 24hr, 1= 12hr
show12hr = 0
# Set brightness of LED - 1-255
# (recommend 10-20, put 0 and you won't see it!)
led_brightness = 50
# Choose how to flash change of hour - 1= white leds, 2= all flash
hour_flash = 2
# arms
arm_top = {i: 0 for i in range(1, 7)}
arm_right = {i: 0 for i in range(7, 13)}
arm_bottom = {i: 0 for i in range(13, 19)}
# link arm to a time value
armConfig = {
"1_seconds": arm_top,
"2_minutes": arm_right,
"3_hours": arm_bottom,
}
###
# End of customising
###
pyglow.all(0)
hour_count = 0
hour_current = 0
def assign_binary_value_to_arm(binary_value, arm):
arm_led_numbers = [n for n in sorted(arm.iterkeys())]
return {arm_led_numbers[key]: value for key, value in enumerate(reversed(list(binary_value)))}
def turn_on_off_led(hour, minute, second):
bin_hour = "%06s" % bin(hour)[2:]
bin_min = "%06s" % bin(minute)[2:]
bin_sec = "%06s" % bin(second)[2:]
armConfig["1_seconds"] = assign_binary_value_to_arm(bin_sec, armConfig["1_seconds"])
armConfig["2_minutes"] = assign_binary_value_to_arm(bin_min, armConfig["2_minutes"])
armConfig["3_hours"] = assign_binary_value_to_arm(bin_hour, armConfig["3_hours"])
for key in sorted(armConfig.iterkeys()):
for led_number in sorted(armConfig[key].iterkeys()):
pyglow.led(led_number, led_brightness if armConfig[key][led_number] == "1" else 0)
while True:
now = datetime.now()
hour = now.hour
if show12hr == 1 and now.hour > 12:
hour -= 12
# Check if current hour is different and set ready to flash hour
if hour_current != hour:
hour_count, hour_current = hour, hour
turn_on_off_led(hour, now.minute, now.second)
# Flash the white leds for the hour
if hour_count != 0:
sleep(0.5)
if hour_flash == 1:
pyglow.color("white", led_brightness)
if hour_flash == 2:
pyglow.all(led_brightness)
sleep(0.5)
hour_count -= 1
else:
sleep(0.1)
| true |
a0a22a65cde15a91ea48386b1512e66fbc64f72d | Python | Lana-Pa/Codility | /binary_gap.py | UTF-8 | 718 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | #binary gap ex
def solution(N):
b_num = bin(N)[2:]
print b_num
gap = False
maxgap = 0
for i in range(len(b_num)-1):
# start of the gap if current element is 0 and prev element is 1
if b_num[i]=='1' and b_num[i+1]=='0':
gap = True
count = 0
# stop of the gap, if current element is 1 and prev element is 0
elif b_num[i]=='0' and b_num[i+1]=='1':
gap = False
# in the gap, count current element(i+1), if it's zero
if b_num[i+1]=='0' and i+1<>len(b_num)-1:
count+=1
# find max
if count>maxgap:
maxgap=count
print "maxpgap:", maxgap
solution(20) | true |
a99972ec7faa5a81db589e861b675cd0b6e0681c | Python | myloveapollo/Make_excel_tool | /MakeExcelTool/TextCtrl.py | UTF-8 | 1,484 | 2.546875 | 3 | [] | no_license | import wx
import done1
class MyDialog(wx.Dialog):
def __init__(self, parent, text):
wx.Dialog.__init__(self, parent, -1, u'版本信息', pos = wx.DefaultPosition, size=(500, 300))
sizer = wx.GridSizer(rows=5, cols=1)
label_1 = wx.StaticText(self, -1, text)
label_1.SetFont(wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.BOLD))
label_2 = wx.StaticText(self, -1, u'软件版本:V1.0(2018.03.01')
label_2.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
label_3 = wx.StaticText(self, -1, u'版权所有:maydolly')
label_3.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
label_4 = wx.TextCtrl(self, -1, u'联系作者:www.classnotes.cn', size=(300, -1), style=wx.TE_READONLY | wx.TE_AUTO_URL | wx.TE_RICH | wx.BORDER_NONE)
label_4.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
label_4.SetBackgroundColour(self.BackgroundColour)
label_4.Bind(wx.EVT_TEXT_URL, self.OnButton)
self.number = 1
okbtn = wx.Button(self, wx.ID_OK,'OK')
okbtn.SetDefault()
sizer.Add(label_1, flag=wx.ALIGN_CENTER)
sizer.Add(label_2, flag=wx.ALIGN_CENTER)
sizer.Add(label_3, flag=wx.ALIGN_CENTER)
sizer.Add(label_4, flag=wx.ALIGN_CENTER)
sizer.Add(okbtn, flag=wx.ALIGN_CENTER)
self.SetSizer(sizer)
def OnButton(self, evt):
if evt.GetMouseEvent().LeftIsDown():
webbrowser.open('http://www.classnotes.cn')
if __name__ == '__main__':
app = wx.App()
SiteFrame = done1.SiteLog()
MyDialog = MyDialog()
SiteFrame.Show(MyDialog)
app.MainLoop()
| true |
b2000e0ad6e7454cadf8fe4e390acaaefce3a585 | Python | amitsaha/python-web-app-recipes | /middleware/flask/app4.py | UTF-8 | 1,845 | 3.140625 | 3 | [
"MIT"
] | permissive | # This is an example of implementing middleware via
# decorators that Flask framework exposes namely:
#
# app.before_first_request
# app.before_request
# app.after_request
# There are blueprint specific decorators as well, such as:
# http://flask.pocoo.org/docs/0.12/api/#flask.Blueprint.before_request
# which are useful when you want to execute code which is blueprint
# specific rather than app specific
from flask import Flask, request
import logging
app = Flask(__name__)
# From https://github.com/benoitc/gunicorn/issues/379
# This gets called only *once* before first request to the
# app
@app.before_first_request
def setup_logging():
# Configure app.logger to log to stderr
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
# Functions registered with before_request()
# are called in order they are registered
@app.before_request
def before_request1():
# We can access the request object to access
# any relevant information from the request
app.logger.debug(request.headers)
app.logger.info('before_request1 called')
@app.before_request
def before_request2():
app.logger.info('before_request2 called')
# Functions registered with after_request get called
# in a "LIFO" order i.e. the function registered last
# gets called first and vice-versa
@app.after_request
def after_request1(resp):
app.logger.info('after_request1 called')
return resp
@app.after_request
def after_request2(resp):
app.logger.info('after_request2 called')
return resp
@app.errorhandler(500)
def handle_500(error):
print("500 error handler called")
app.logger.error(error)
return str(error), 500
@app.route('/test')
def test():
try:
1/0
except Exception:
raise
return 'test successful'
if __name__ == '__main__':
app.run()
| true |
33bf7df48abdc38126b7bac3a3409303a60b072e | Python | rabits/tracker | /src/DataModule.py | UTF-8 | 4,481 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import threading
import Log as log
from Module import Module
from Library import mapValue, mergeDicts, evaluateMath
class DataModule(Module):
"""DataModule - common data sensors functions"""
def __init__(self, **kwargs):
Module.__init__(self, **kwargs)
self._raw_data = {}
self._raw_data_lock = threading.Lock()
self._map_data = {}
self._map_data_lock = threading.Lock()
self._thread_control = threading.Thread(target=self._readRawDataThreadWrapper)
def start(self):
Module.start(self)
self._reinitMap()
if not self._thread_control.is_alive():
self._thread_control.start()
def _reinitMap(self):
self._map = self._cfg.get('map', {})
# Converting list to dict to use the same interface
if isinstance(self._map, list):
self._map = { i:v for i,v in enumerate(self._map) }
elif not isinstance(self._map, dict):
self._map = {}
log.error('Wrong map format in %s(%s)' % (self.__class__.__name__, self.name()))
def _readRawDataThreadWrapper(self):
'''This wrapper is required to run prepare & cleaning operations for the read thread'''
try:
log.debug('%s %s reading thread started' % (self.__class__.__name__, self.name()))
self._readRawDataThread()
finally:
log.debug('%s %s reading thread completed' % (self.__class__.__name__, self.name()))
self._cleanData()
def _readRawDataThread(self):
'''Thread function to get & update values of the collected data'''
log.error('Issue with %s(%s) DataModule implementation, it should override the _readRawDataThread function' % (self.__class__.__name__, self.name()))
def _processData(self, data):
'''Helper to update _raw_data and _map_data and trigger required signals'''
# Determining changes
to_update = []
for i, val in data.iteritems():
if self._raw_data.get(i, None) != val:
to_update.append(i)
if not self._map:
log.debug('Raw data value %d changed: %s to %s' % (i, self._raw_data.get(i), val))
# Atomic update the raw data
with self._raw_data_lock:
for i in to_update:
self._raw_data[i] = data[i]
# Mapping raw data by provided map
if self._map:
mdata = {}
for i in to_update:
mdata.update(mapValue(i, data, self._map, self._map_data))
changes = {}
with self._map_data_lock:
changes = mergeDicts(self._map_data, mdata)
changes = self._processChanged(changes, self._map, self._map_data)
if changes:
self.signal('changes', sender=self.name(), data=changes)
elif to_update:
self.signal('changes', sender=self.name(), data={k:v for k,v in data.iteritems() if k in to_update})
def _processChanged(self, changes, data_map, data):
'''Function will execute required signals based on changes data'''
out = {}
for key in changes:
if not data_map.get(key, {}).get('enabled', True):
continue
if isinstance(changes[key], dict):
log.debug('- %s(%s) changes: %s:' % (self.__class__.__name__, self.name(), key))
out[key] = self._processChanged(changes[key], data_map.get(key, {}), data.get(key, {}))
continue
log.debug('- %s(%s) changes: %s = %s (%s)' % (self.__class__.__name__, self.name(), key, data.get(key), changes.get(key)))
out[key] = changes[key]
signals = data_map.get(key, {}).get('signals', [])
for s in signals:
if s.has_key('when') and not evaluateMath(s.get('when'), data.get(key), changes.get(name, s.get('default_value', 0))):
continue
self.signal(s)
return out
def _cleanData(self):
'''Will clean all the data when module is inactive'''
log.debug('Deactivation - cleaning data')
self._active = False
with self._raw_data_lock:
self._raw_data = {}
with self._map_data_lock:
self._map_data = {}
def getRawDataValue(self, index):
with self._raw_data_lock:
return self._raw_data.get(index, None)
| true |
ed648577588924cf682839060426b805bd0d5608 | Python | diegoami/data_science_course_udacity | /t-tests/grad_desc_new_5.py | UTF-8 | 2,040 | 3.28125 | 3 | [] | no_license | import numpy
import pandas
import numpy
import pandas
def normalize_features(array):
array_normalized = (array-array.mean())/array.std()
mu = array.mean()
sigma = array.std()
return array_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost of a list of parameters, theta, given a list of features
(input data points) and values (output data points).
"""
m = len(values)
theta_flipped = numpy.array(theta).reshape(numpy.size(theta,1),numpy.size(theta,0))
sum_of_square_errors = numpy.square(numpy.dot(features, theta_flipped)- values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
"""
count = 0
cost_history = []
m = len(values)
values_flipped = numpy.array(values).reshape(numpy.size(values,1),numpy.size(values,0))
while count < num_iterations:
if theta is not None:
theta_flipped = numpy.array(theta).reshape(numpy.size(theta,1),numpy.size(theta,0))
predicted_values = numpy.dot(features,theta_flipped)
theta = theta - (alpha / m ) * numpy.dot((predicted_values - values_flipped),features)
else:
ndot = numpy.dot(-values_flipped, features.astype(float))
theta = - (alpha / m) * ndot
cost = compute_cost(features, values, theta)
cost_history.append(cost)
count = count+1
return theta, pandas.Series(cost_history)
MAX_SIZE = 100
MAX_ITER = 50
data = pandas.read_csv('baseball_stats.csv')
features_s = data.loc[0:MAX_SIZE,['height','weight']].fillna(0)
features = features_s.apply(pandas.to_numeric,errors='coerce').fillna(0)
values = data.loc[0:MAX_SIZE,['avg']].fillna(0)
values = numpy.array(values)
print(features)
print(values)
result = gradient_descent(features, values,None, +0.00000001, MAX_ITER)
print(result[1])
print(result[0])
| true |
6d6c6aaea71a3166bf1e18ee7956d18195834dc8 | Python | sambd86/alzheimers-cnn-study | /scripts/preprocessing/default_preprocessing_generator.py | UTF-8 | 4,679 | 2.65625 | 3 | [] | no_license | import pandas as pd
from utils import safe_listdir
MRI_LIST_PATH = "../../data/MRILIST.csv"
VISITS_PATH = "../../data/VISITS.csv"
def default_preprocessing_generator(dataset_path, adni_merge):
"""This function provides a generator to iterate through all of the images from the ADNI dataset with the
"MPR", "GradWarp", "B1 Correction", "N3 Scaled" pre-processing steps applied. This generator assumes the following
data file structure.
dataset_path
| - subset 0
| - patient_id_1
|- MPR__GradWarp__B1_Correction__N3
|- visit_date_1
|- image_id
|- *.nii files
|- visit_date_2
|- visit_code
|- *.nii files
| - patient_id_2
|- MPR__GradWarp__B1_Correction__N3
|- ...
...
| - subset 0
|- ...
| - subset 0
|- ...
| - subset 0
...
"""
sub_folders = safe_listdir(dataset_path)
mri_list = pd.read_csv(MRI_LIST_PATH,
dtype={"STUDYID": "Int64", "SERIESID": "Int64", "IMAGEUID": "Int64"})
visit_metadata = pd.read_csv(VISITS_PATH, dtype={"VISORDER": "Int64"})
for folder in sub_folders:
patient_folder = f"{dataset_path}/{folder}"
patient_ids = safe_listdir(patient_folder)
for patient_id in patient_ids:
visit_dates_folder = f"{patient_folder}/{patient_id}/MPR__GradWarp__B1_Correction__N3"
visit_dates = safe_listdir(visit_dates_folder)
for visit_date in visit_dates:
image_folder = f"{visit_dates_folder}/{visit_date}"
series_ids = safe_listdir(image_folder)
for series_id in series_ids:
data_files_folder = f"{image_folder}/{series_id}"
files = safe_listdir(data_files_folder)
files_with_nii_extension = list(filter(lambda x: x[-3:] == "nii", files))
if len(files_with_nii_extension) != 1:
print(f"There are {len(files_with_nii_extension)} files with .nii extension, expecting 1.")
continue
else:
nii_file = f"{data_files_folder}/{files_with_nii_extension[0]}"
# Matching the image metadata with MRILIST.csv
subject_match = mri_list["SUBJECT"] == patient_id
series_id_match = mri_list["SERIESID"] == int(series_id.split("S")[1])
mri_record = mri_list[subject_match & series_id_match]
if mri_record.empty or len(mri_record) > 1:
print(f"There are {len(mri_record)} records in MRILIST.csv for {patient_id} {series_id}, "
f"skipped...")
continue
visit_name = mri_record["VISIT"].iloc[0]
# "ADNI *" => "*"
if visit_name[:5] == "ADNI ":
visname = visit_name.split(" ")[1]
elif visit_name[:9] == "ADNI1/GO ":
visname = " ".join(visit_name.split(" ")[1:])
else:
visname = visit_name
visit_code = visit_metadata[visit_metadata["VISNAME"] == visname]["VISCODE"].iloc[0]
# https://adni.loni.usc.edu/wp-content/uploads/2008/07/adni2-procedures-manual.pdf page 37
# The window from Screening to Baseline is 28 days
is_screening = visit_code == "sc"
ptid_match = adni_merge["PTID"] == patient_id
viscode_match = adni_merge["VISCODE"] == (visit_code if not is_screening else "bl")
adni_merge_record = adni_merge[ptid_match & viscode_match]
if adni_merge_record.empty:
print(f"There are no {patient_id} with {visit_code} ({visit_name}, {series_id}) in "
f"ADNIMERGE, skipping...")
continue
if len(adni_merge_record) > 1:
print(f"There are more than 1 records ({len(adni_merge_record)}) for {patient_id} with "
f"{visit_code} in ADNIMERGE, skipping...")
dx = adni_merge_record["DX"].iloc[0]
yield patient_id, visit_code, nii_file, dx
| true |
0533728813b19c3d1a05a060f0b04b134a313815 | Python | wangtao4256/Python_Automation | /gen/bfd_futureland/encryption/encrypt.py | UTF-8 | 863 | 2.578125 | 3 | [] | no_license | __author__ = 'KD'
# -*- coding:utf-8 -*-
#!/usr/bin/python
from cryptography.fernet import Fernet
def getcipher():
cipher_key = b'PabcRCgkFXUu5aT1il_VFA9kpUCq8VwnebWf9fCbqvQ='
cipher = Fernet(cipher_key)
return cipher
def encrypt(content):
cipher = getcipher()
content = bytes(content)
encrypted_content = cipher.encrypt(content)
return encrypted_content
def decrypt(content):
cipher = getcipher()
#content = bytes(content,encoding="utf8")
decrypted_content = cipher.decrypt(content)
return decrypted_content
# if __name__ == '__main__':
# content = b"test"
# ency = encrypt(content)
# decy = decrypt(ency)
if __name__ == '__main__':
content = b'gpadmin'
ency = encrypt(content)
print(ency)
decy = decrypt(b'gAAAAABZAtzsYOPoxL2KyCESsj7et-CMRt7oSpSp7gymbJ13k4bLbPJBlaDYBQfCToLrML14XqzazxKr8Pamu0mhftrY0CPISg==')
print(decy) | true |
f65b8c56cc74a9318991f22b32946d87d4b4bb59 | Python | jlucasoliveira/redes-trabalhos | /_WebServerTCPMT.py | UTF-8 | 2,289 | 2.9375 | 3 | [] | no_license | #import socket module
from socket import *
import sys
import threading
threads = []
server_socket = socket(AF_INET, SOCK_STREAM)
#Prepara o socket servidor
server_socket.bind(('', 9999))
server_socket.listen(5)
# reusar sockets usados anteriormente ainda nao fechados
server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
class ThreadWebServer(threading.Thread):
def __init__(self, conn_socket, addr):
self.conn_socket = conn_socket
self.addr = addr
threading.Thread.__init__(self)
def run(self):
while True:
# Estabelece a conexão
print('Ready to serve {}:{}...'.format(self.addr[0], self.addr[1]))
try:
message = self.conn_socket.recv(1024).decode()
print(message)
if not message:
break
# separando por espaço a requisição recebida
arquivo_requisitado = message.split(' ')[1]
if arquivo_requisitado == '/':
arquivo_requisitado = arquivo_requisitado + 'index.html'
# 1º caractere desse split eh /, removendo com a sublista [1:]
arquivo = open(arquivo_requisitado[1:])
outputdata = arquivo.read()
arquivo.close()
# Envia um linha de cabeçalho HTTP para o socket
cabecalho = b'HTTP/1.1 200 OK\r\n'
self.conn_socket.send(cabecalho)
# Envia o conteúdo do arquivo solicitado ao cliente
for i in range(0, len(outputdata)):
self.conn_socket.send(outputdata[i].encode())
self.conn_socket.send('\r\n\r\n'.encode())
self.conn_socket.close()
except Exception as e:
# Envia uma mensagem de resposta “File not Found”
self.conn_socket.send(b'HTTP/1.1 404 Not Found\r\n\r\n')
# Fecha o socket cliente
self.conn_socket.close()
while True:
con, addr = server_socket.accept()
sock_thread = ThreadWebServer(con, addr)
sock_thread.start()
threads.append(sock_thread)
for thread in threads:
thread.join()
server_socket.close()
sys.exit()#Termina o programa depois de enviar os dados | true |
50335212647c61ea6f7fb3ae597f9bd85e0cc340 | Python | shinmao/Pyspark-NYC-taxi-trip-duration-analysis | /visual.py | UTF-8 | 3,081 | 3.09375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import datetime
from datetime import datetime
from math import sin, radians, cos, asin, sqrt
def distance(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
diff_lon = lon2 - lon1
diff_lat = lat2 - lat1
a = sin(diff_lat/2)**2 + cos(lat1) * cos(lat2) * sin(diff_lon/2)**2
c = 2 * asin(sqrt(a))
radius = 6371
return c * radius
df = pd.read_csv("/Users/rafaelchen/Desktop/train.csv")
df.pickup_datetime = df.pickup_datetime.apply(lambda x:datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
df["pickup_month"] = df["pickup_datetime"].map(lambda x: x.month)
df["pickup_week"]=df.pickup_datetime.apply(lambda x: x.weekday())
df["pickup_day"]=df.pickup_datetime.apply(lambda x: x.day)
df["pickup_hour"] = df.pickup_datetime.apply(lambda x: x.hour)
#df["trip_distance"] = df["pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude"].apply(lambda x: distance(x))
month_trip=df.groupby(['pickup_month'])["trip_duration"].agg(["sum","mean","count"])
month_trip=month_trip.reset_index()
month_trip.rename(columns={'sum':'month_sum_trip_dur','mean':'month_avg_trip_dur','count':"month_trip_times"}, inplace = True)
day_trip=df.groupby(['pickup_week'])["trip_duration"].agg(["sum","mean","count"])
day_trip=day_trip.reset_index()
day_trip.rename(columns={'sum':'day_sum_trip_duration','mean':'day_avg_trip_dur','count':"day_trip_times"}, inplace = True)
hour_trip = df.groupby(['pickup_hour'])["trip_duration"].agg(["mean"])
hour_trip = hour_trip.reset_index()
hour_trip.rename(columns={'mean':'hour_avg_trip_duration'}, inplace = True)
pass_trip = df.groupby(['passenger_count'])["trip_duration"].agg(["mean"])
pass_trip = pass_trip.reset_index()
pass_trip.rename(columns={'mean':'people_avg_trip_duration'}, inplace = True)
#dist_trip = df.groupby(['trip_distance'])["trip_duration"].agg(["mean"])
#dist_trip = dist_trip.reset_index()
#dist_trip.rename(columns={'mean':'dist_avg_trip_duration'}, inplace = True)
plt.figure(figsize=(8,9))
ax1= plt.plot(month_trip.pickup_month, month_trip.month_avg_trip_dur, color="green",alpha=0.8,marker='o')
plt.title("Monthly Trip Duration")
plt.ylabel('Avg of trip druration', fontsize=12)
plt.xlabel('Pickup_Month', fontsize=12)
plt.show()
ax1= plt.plot(day_trip.pickup_week, day_trip.day_trip_times, color="red", alpha=0.8,marker='*')
plt.title("Day Trip Time")
plt.ylabel('Day trip time', fontsize=12)
plt.xlabel('Pickup_Day', fontsize=12)
plt.show()
ax1= plt.plot(hour_trip.pickup_hour, hour_trip.hour_avg_trip_duration, color="green",alpha=0.8,marker='o')
plt.title("Hour Trip Duration")
plt.ylabel('Avg of trip druration', fontsize=12)
plt.xlabel('Pickup_Hour', fontsize=12)
plt.show()
ax1= plt.plot(pass_trip.passenger_count, pass_trip.people_avg_trip_duration, color="green",alpha=0.8,marker='o')
plt.title("Passenger - Trip Duration")
plt.ylabel('Avg of trip druration', fontsize=12)
plt.xlabel('Passenger Count', fontsize=12)
plt.show() | true |
68b9b83a5a64bc23cf8762188151d50484370828 | Python | yvonnewanjiru/dbitlmr405520-DBT1303PythonProjects | /traffic.py | UTF-8 | 1,451 | 3.234375 | 3 | [] | no_license | import json
import requests
import pandas as pd
import time
import matplotlib.pyplot as plt
from datetime import datetime
roads={
1:"Road A",
2:"Road B",
3:"Road C"
}
print(
"1 For Road A\n"
"2 For Road B\n"
"3 For Road C\n"
"4 For Road D\n"
)
road_in=int(input("Please Select Road To Monitor: "))
print(roads[road_in])
weekDay={
1:["Mon","Monday"],
2:["Tue","Tuesday"],
3:["Wed","Wednesday"],
4:["Thu","Thursday"],
5:["Fri","Friday"],
6:["Sat","Saturday"],
7:["Sun","Sunday"]
}
week_no=(datetime.today().weekday())
current_day=weekDay[week_no][1]
day_abb=weekDay[week_no][0]
print("Please Wait As We generate A Graph\n")
print(f"Road: {roads[road_in]} \nDay Of Week: {current_day}")
time.sleep(4.5)
day=[]
congestion=[]
date=[]
week=[]
congestion_url="https://api.midway.tomtom.com/ranking/dailyStats/ITA_milan"
jams_url="https://api.midway.tomtom.com/ranking/liveHourly/ITA_milan"
limit=15
data_request=requests.get(congestion_url)
feedback=data_request.json()
for data in feedback:
date.append(data["date"])
day.append(data["weekday"])
week.append(data["week"])
congestion.append(data["congestion"])
fig_shape=plt.figure(figsize=(5,7))
plt.bar(day[:week_no],congestion[:week_no])
plt.show()
for conges,day in zip(congestion,day):
if(day==day_abb):
print(f"The Congestion is: {conges}% For {day}")
break
| true |
8582dc2994f5016522c286fbffd7aa06d56b1d1b | Python | Akavall/Profit-Calculation-and-Simulation | /plot_generation.py | UTF-8 | 3,064 | 2.875 | 3 | [] | no_license | from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import logging
import json
from calc_profit import calc_profit
from general_simulations import calc_profit_sim_wrapper
logging.getLogger().setLevel(logging.INFO)
def generate_plot_data():
days_before_break_up_list = []
prob_down_list = []
profits_list = []
for days_before_break in xrange(1, 126, 5):
logging.info("On day : {}".format(days_before_break))
for prob_down in np.linspace(0.5, 1, 25):
days_before_break_up_list.append(days_before_break)
prob_down_list.append(prob_down)
profits_list.append(calc_profit(125, days_before_break, prob_down)['profit'])
return days_before_break_up_list, prob_down_list, profits_list
def make_3d_scatter_plot(x, y, z, x_label, y_label, z_label):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=x, ys=y, zs=z, zdir='z', label='ys=0, zdir=z')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
plt.show()
def generate_plot_data_general():
break_up_prob_list = []
conv_prob_list = []
profits_list = []
# coords = [0.0, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012, 0.013, 0.014, 0.015, 0.016, 0.017, 0.018, 0.019, 0.02]
conv_prob_coords = np.linspace(0, 1, 40)
break_up_coords = np.linspace(0, 0.04, 40)
for conv_prob in conv_prob_coords:
#debug
#conv_prob = 1
logging.info("conv_prob : {}".format(conv_prob))
for break_up_prob in break_up_coords:
# break_up_prob = 0
break_up_prob_list.append(break_up_prob)
conv_prob_list.append(conv_prob)
variance = break_up_prob * 100
thresh = break_up_prob
temp = calc_profit_sim_wrapper(250, conv_prob, break_up_prob, variance, thresh, 5000)
# logging.info("profit: {}".format(temp))
profits_list.append(temp['profit'])
return conv_prob_list, break_up_prob_list, profits_list
def make_3d_scatter_plot_general(x, y, z, x_label, y_label, z_label):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=x, ys=y, zs=z, zdir='z', label='ys=0, zdir=z')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
plt.show()
if __name__ == "__main__":
# x, y, z = generate_plot_data()
# make_3d_scatter_plot(x, y, z, "Days Before Break Up", "Probability to Converge", "Profit")
x, y, z = generate_plot_data_general()
try:
with open("/home/kirill/Research/Profit-Calculation-and-Simulation/plot_back_up/plot_2.json", "w") as f:
json.dump({"conv": x, "break_up": y, "profits": z}, f)
except:
logging.INFO("Saving to json failed")
make_3d_scatter_plot(x, y, z, "Probability to Converge", "Probability of Break Up", "Profit")
# break_up_prob => zero profit because
# variance is also zero at that point
| true |
9177afbd2a5903377aa4041ebbb1093f9d2e86d8 | Python | jedzej/tietopythontraining-basic | /students/mielczarek_szymon/lesson_09_reading_and_writing_files/mad_libs.py | UTF-8 | 1,263 | 3.484375 | 3 | [] | no_license | #! python3
import argparse
"""
Mad Libs program reads in text files and lets the user add their own text
anywhere the word ADJECTIVE, NOUN, ADVERB, or VERB appears in the text file.
"""
mad_words = ['ADJECTIVE', 'NOUN', 'ADVERB', 'VERB']
def find_mad_word(word):
for w in mad_words:
if w in word:
return w
return None
def main():
parser = argparse.ArgumentParser(description='Mad Libs program')
parser.add_argument('-i', metavar='in-file',
type=argparse.FileType('r'),
default='input_file.txt')
parser.add_argument('-o', metavar='out-file',
type=argparse.FileType('w'),
default='output_file.txt')
args = parser.parse_args()
modified_text = ""
for line in args.i.readlines():
words = []
for word in line.split():
mad_word = find_mad_word(word)
if mad_word:
word = word.replace(mad_word, input("Enter {}:\n".format(
mad_word.lower())))
words.append(word)
modified_text += ' '.join(words) + '\n'
print(modified_text)
args.o.write(modified_text)
args.o.close()
if __name__ == "__main__":
main()
| true |
db8b4f5ea972958a9c1ec74df7fba698bb350041 | Python | roger-mayer/python-practice | /crash_course/lists/toppings_list.py | UTF-8 | 890 | 3.890625 | 4 | [] | no_license |
requested_toppings = ['mushrooms', 'extra cheese', 'pepperoni']
print("\nIf statement:")
if 'mushrooms' in requested_toppings:
print(f"adding {requested_toppings[0]}")
if 'extra cheese' in requested_toppings:
print(f"adding {requested_toppings[1]}")
print("Finished making your pizza")
print("\nFor/if-else list:")
for requested_topping in requested_toppings:
if requested_topping == 'pepperoni':
print(f"Sorry we are out of {requested_topping}")
else:
print(f"adding {requested_topping} to your pizza")
print("\nUsing multiple lists:")
available_toppings = ['mushrooms', 'pepperoni', 'sausage', 'olives']
for requested_topping in requested_toppings:
if requested_topping in available_toppings:
print(f"adding {requested_topping}")
else:
print(f"we are out of {requested_topping}")
print("we are finished making your pizza")
| true |
4d5320576d9c3b0fc67aa5c1220d85d3c09d72f0 | Python | TheMadDuck/neural_neophyte | /Python/AI_Environment/eloRanking.py | UTF-8 | 4,595 | 2.65625 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
import math as mt
import random as rd
import pickle
import os
import shutil
import AIenvironment as AIEnv
import gameFlowClass as gFC
#########################################
#import own random distrib
import nRandomDistrib.nRand as nRand
def naiveElo(leftValue, rightValue, decition):
distance = abs(leftValue - rightValue)
normedDistance = (distance/5 + 0.1)
if(decition == 0): #played a draw #TODO: bei draw müssen die punkte anders verteilt werden. Was ist mit grossen punkte unterschieden?
return (leftValue, rightValue)
if(decition == 1):
normedDistance = - normedDistance
leftValue += normedDistance
rightValue -= normedDistance
return (leftValue, rightValue)
#########################################
#get elo-numbers from file
def loadEloFile(gameName):
if (not os.path.isdir("./best_models")):
os.makedirs("./best_models") # maybe in a try catch block
print("best models is created")
# gameName = subPr.getName()
if (not os.path.isdir("./best_models/" + str(gameName))):
os.makedirs("./best_models/" + str(gameName))
print(str(gameName) + "-folder is created")
eloFileName = None
for file in os.listdir("./best_models/" + str(gameName)):
if file.endswith(".txt"):
eloFileName = ("./best_models/" + str(gameName) + "/" + str(file))
print (eloFileName)
if (eloFileName):
eloFile = pickle.load(open(eloFileName, 'rb'))
else:
eloFile = None
return eloFile
#########################################
#get elo-numbers from file
def saveEloFile(eloRanking, gameName):
if (not os.path.isdir("./best_models")):
os.makedirs("./best_models") # maybe in a try catch block
print("best models is created")
# gameName = subPr.getName()
if (not os.path.isdir("./best_models/" + str(gameName))):
os.makedirs("./best_models/" + str(gameName))
print(str(gameName) + "-folder is created")
eloFileName = ("./best_models/" + str(gameName) + "/" + "elo_ranking.txt")
print (eloFileName)
if (eloFileName):
pickle.dump(eloRanking, open(eloFileName, 'wb'))
return 0
################################################
def sortModels(eloRanking, gameName):
newEloRanking = sorted(range(len(eloRanking)), key=lambda k: eloRanking[k])
#to prevent that we overwrite existing files we have to make temp-files:
for i in range(len(eloRanking)):
shutil.copy2("./best_models/" + str(gameName) + "/best_model_" + str(i) + ".pkl", "./best_models/" + str(gameName) + "/temp_best_model_" + str(i) + ".pkl")
#overwrite the old model-files with the sorted temp-files:
for i in range(len(eloRanking)):
shutil.move("./best_models/" + str(gameName) + "/temp_best_model_" + str(i) + ".pkl", "./best_models/" + str(gameName) + "/best_model_" + str(newEloRanking[i]) + ".pkl") #wegen move gibt es jetzt keine temp_file mehr, und die alte best_model file wurde überschrieben !!?!
return newEloRanking
#################################################
def turnier(gameFlow, amountGames, amountModels, gameName):
#eloRanking = loadEloFile(gameName) # brauchen wir wirklich die ergebniss vorheriger turniere??
#if (eloRanking == None):
eloRanking = [1000] * amountModels
print(eloRanking)
for gameNumber in range(amountGames):
KI_One = nRand.nRand(amountModels) #TODO checken: wenn amountModels > 1, wird dann je ein random model benutzt? kommt das random model überhaupt in das eloRanking model??
KI_Two = nRand.nRand(amountModels)
#AIEnv.gameFlow([KI_One, KI_Two])
gameFlow.resetgame()
gameFlow.gameFlow([KI_One, KI_Two])
print(eloRanking[KI_One])
print(eloRanking[KI_Two])
print(gameFlow.getWinner())
newRanking = naiveElo(eloRanking[KI_One], eloRanking[KI_Two], gameFlow.getWinner())
eloRanking[KI_One] = newRanking[0]
eloRanking[KI_Two] = newRanking[1]
#erst sortModels zum pkl datei umbenennen
sortModels(eloRanking, gameName)
eloRanking = sorted(eloRanking)
#dann nochmal sortieren für die txt datei
saveEloFile(eloRanking, gameName)
print(eloRanking)
return eloRanking
#test = [2,3,1,4,5]
#print(sortModels(test))
#TODO sort ranking worst to best. change names of (best-)model files. (from name to temp_name to new_name)
#TODO the whole file is a little ugly. housekeeping!
#bMap = {"synn": 1, "maij": 1}
#updatedElos = naiveElo(1.0, 2.0, 1)
#print(updatedElos)
| true |
92ae052c5890a65a15825ec42d3142b99c72b92d | Python | ddarkclay/programming-cookbook | /Hello_Python/returning_two_values.py | UTF-8 | 151 | 3.8125 | 4 | [] | no_license | def add_mul(x,y):
c = x+y
d = x*y
return c,d
result1,result2 = add_mul(4,7)
print("Addition is : ",result1,"Multiplication is : ",result2) | true |
6d6cf6f0312877bd441709df0d48a7b9aeb2d1db | Python | pietjan12/pacman_deeplearning | /gameinput.py | UTF-8 | 2,164 | 2.640625 | 3 | [] | no_license | # gameinput Module
import pacman
JS_XAXIS=0 # axis 0 for left/right (default for most joysticks)
JS_YAXIS=1 # axis 1 for up/down (default for most joysticks)
JS_STARTBUTTON=0 # button number to start the game.
pacman.pygame.joystick.init()
joystick_count = pacman.pygame.joystick.get_count()
if(joystick_count > 0):
joyin = pacman.pygame.joystick.Joystick(0)
joyin.init()
else:
joyin = None
def CheckIfCloseButton(events):
for event in events:
if event.type == pacman.pygame.QUIT:
pacman.pygame.sys.exit(0)
def CheckInputs(game, player):
if game.mode == 1:
if pacman.pygame.key.get_pressed()[pacman.pygame.K_RIGHT] or (joyin != None and joyin.get_axis(JS_XAXIS) > 0):
if not pacman.thisLevel.CheckIfHitWall(player.x + player.speed, player.y, player.nearestRow, player.nearestCol):
player.velX = player.speed
player.velY = 0
elif pacman.pygame.key.get_pressed()[pacman.pygame.K_LEFT] or (joyin != None and joyin.get_axis(JS_XAXIS) < 0):
if not pacman.thisLevel.CheckIfHitWall(player.x - player.speed, player.y, player.nearestRow, player.nearestCol):
player.velX = -player.speed
player.velY = 0
elif pacman.pygame.key.get_pressed()[pacman.pygame.K_DOWN] or (joyin != None and joyin.get_axis(JS_YAXIS) > 0):
if not pacman.thisLevel.CheckIfHitWall(player.x, player.y + player.speed, player.nearestRow, player.nearestCol):
player.velX = 0
player.velY = player.speed
elif pacman.pygame.key.get_pressed()[pacman.pygame.K_UP] or (joyin != None and joyin.get_axis(JS_YAXIS) < 0):
if not pacman.thisLevel.CheckIfHitWall(player.x, player.y - player.speed, player.nearestRow, player.nearestCol):
player.velX = 0
player.velY = -player.speed
if pacman.pygame.key.get_pressed()[pacman.pygame.K_ESCAPE]:
pacman.pygame.sys.exit(0)
elif game.mode == 3:
if pacman.pygame.key.get_pressed()[pacman.pygame.K_RETURN] or (joyin != None and joyin.get_button(JS_STARTBUTTON)):
game.StartNewGame() | true |
871d51b99c1c8d4ede1d8a344403c14a021dccd3 | Python | callmead/ComputerSecurity | /Part5B.py | UTF-8 | 402 | 2.859375 | 3 | [] | no_license | import tools, sys
def extract_message(ciphertext_path):
cipher_message = open(ciphertext_path).read().strip()
print 'Message in ciphertext ->', tools.int_to_text(tools.find_root(int(cipher_message[1:-3]), 3))
if __name__ == "__main__":
try:
sys.argv[1]
except IndexError:
print 'Please provide the ciphertext file path'
quit()
extract_message(sys.argv[1])
| true |
bca27255ada364f24ab606b606d35b1d321adf3c | Python | WinrichSy/Codewars_Solutions | /Python/7kyu/SeriesOfIntegersFromMToN.py | UTF-8 | 155 | 3.15625 | 3 | [] | no_license | #Series of integers from m to n
#https://www.codewars.com/kata/5841f680c5c9b092950001ae
def generate_integers(m, n):
return [i for i in range(m,n+1)]
| true |
4c8e2493551d2b75a3f595387ab59ea7a5654e30 | Python | swinxxx/ana_project17 | /ana/add_disease.py | UTF-8 | 4,947 | 2.53125 | 3 | [] | no_license | import random
import os
import re
import pandas as pd
def fun():
try:
path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'ana/positive.csv')
path1 = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'ana/all.csv')
except:
print('error')
csv_input = pd.read_csv(path)
csv_input['disease']='flu'
csv_input['place']='florida'
for index, row in csv_input.iterrows():
if re.search('vaccine', row[0], re.IGNORECASE) != None :
row[2]='vaccine'
if re.search('cough', row[0], re.IGNORECASE) != None :
row[2] = 'cough'
#-----------------------------------------------------------------------------------------------------------------------------------------
all_input = pd.read_csv(path1)
li=[]
li=all_input['place']
for index, row in csv_input.iterrows():
flag=0
for ind, rowi in all_input.iterrows():
if(str(row[0])==str(rowi[2])):
row[3] = rowi[1]
flag=1
break
if flag == 0 :
row[3] = random.choice(li)
#-------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------neutral----------------------------------------------------------------------------------
try:
path_neu = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'ana/neutral.csv')
except:
print('error')
csv_neu = pd.read_csv(path_neu)
csv_neu['disease']='flu'
csv_neu['place']='florida'
for index, row in csv_neu.iterrows():
if re.search('vaccine', row[0], re.IGNORECASE) != None :
row[2]='vaccine'
if re.search('cough', row[0], re.IGNORECASE) != None :
row[2] = 'cough'
#-----------------------------------------------------------------------------------------------------------------------------------------
for index, row in csv_neu.iterrows():
flag=0
for ind, rowi in all_input.iterrows():
if(str(row[0])==str(rowi[2])):
flag=1
row[3] = rowi[1]
break
if flag == 0:
row[3] = random.choice(li)
#-------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------negative----------------------------------------------------------------------------------
try:
path_neg = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'ana/negative.csv')
except:
print('error')
csv_neg = pd.read_csv(path_neg, encoding = "ISO-8859-1")
csv_neg['disease']='flu'
csv_neg['place']='florida'
for index, row in csv_neg.iterrows():
if re.search('vaccine', row[0], re.IGNORECASE) != None :
row[2]='vaccine'
if re.search('cough', row[0], re.IGNORECASE) != None :
#print("{} \t {}".format(str(row[0]), 'cough'))
row[2] = 'cough'
#-----------------------------------------------------------------------------------------------------------------------------------------
all_input.values.T.tolist()
for index, row in csv_neg.iterrows():
flag=0
for ind, rowi in all_input.iterrows():
if(str(row[0])==str(rowi[2])):
flag=1
row[3] = rowi[1]
break
if flag == 0 :
row[3] = random.choice(li)
print(csv_input)
print("*****************************************************")
print(csv_neg)
print("*****************************************************")
print(csv_neu)
print("*****************************************************")
#-------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------writing to file----------------------------------------------------------------------------------
try:
path_out = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'ana/final_file.csv')
except:
print('error')
with open(path_out, 'a') as out:
csv_input.to_csv(out)
csv_neg.to_csv(out)
csv_neu.to_csv(out)
fun()
| true |
36105b2509774d012c06cb94154c905ef514b147 | Python | jacobbieker/Numerical-Recipes-In-Astrophysics | /tutorial_one.py | UTF-8 | 3,640 | 3.6875 | 4 | [
"MIT"
] | permissive | import math
import matplotlib.pyplot as plt
nums = [i for i in range(100)]
def avg(array):
avg = 0
for element in array:
avg += element
avg /= len(array)
return avg
def std(array):
std = 0
mean = avg(array)
for element in array:
std += abs(element - mean)**2
std /= len(array)
return math.sqrt(std)
even_nums = [i for i in range(0, 100, 2)]
odd_nums = []
for element in nums:
if element not in even_nums:
odd_nums.append(element)
avg(even_nums)
std(even_nums)
avg(odd_nums)
std(odd_nums)
excluded_nums = [i for i in range(10,20,1)]
other_excluded = [i for i in range(45,57,1)]
part_c_array = []
for element in nums:
if element not in excluded_nums and element not in other_excluded:
part_c_array.append(element)
avg(part_c_array)
std(part_c_array)
part_d_array = []
for element in nums:
if element in excluded_nums or element in other_excluded:
part_d_array.append(element)
avg(part_d_array)
std(part_d_array)
# Exercise 1 Part E
x = []
step = 3/100
end = 3
start = 0
for i in range(100):
x.append(start+(step*i))
print(x)
A = 0.8
B = 2
y = []
for element in x:
y.append(A*math.exp(element) - B*element)
plt.scatter(x, y)
plt.title("Part E")
#plt.show()
print("Mean of Y: {}".format(avg(y)))
# Part F
def factorial(number):
if number == 1:
return 1
else:
return number * factorial(number-1)
step = 3/10
x = []
for i in range(10):
x.append(start+(step*i))
def estimate_ex(x, k):
final_solution = 0
final_solution += (x**k)/factorial(k)
y_est = []
for element in x:
y_est.append(estimate_ex(element, 5))
print(len(x))
print(len(y))
plt.scatter(x, y_est, c='red')
plt.show()
# Seems to compare the same. Built-in should be faster.
math.factorial(5)
# Number 3
import numpy as np
def func_a(x):
return 2*x*np.sin(0.8*x)
seven_points = np.linspace(1,20, 7)
all_points = [(x, func_a(x)) for x in seven_points]
def determine_closest_points(points, x_point):
'''
Determine the closest points for them
:param points:
:param x_point:
:return:
'''
point_values = []
y_values = []
for index, point in enumerate(points):
if index+1 > len(points):
if points[index] < x_point:
point_values.append(point)
if points[index][0] < x < points[index+1][0]:
point_values.append(points[index])
point_values.append(points[index+1])
return point_values
def linear_interp(points, new_x_points):
'''
Lienarly interpolate for the points
:param points:
:param new_x_points:
:return:
'''
new_points = []
for x_point in new_x_points:
point_values = determine_closest_points(points, x_point)
if len(point_values) == 1:
# Now extrapolating, not interpolating
return NotImplementedError
else:
new_y = ((point_values[1][1] - point_values[0][1])/(point_values[1][0] - point_values[0][0]))*(x_point - point_values[0][0])
new_points.append((x_point, new_y))
return new_points
def polynomial_interp(points, new_x_points):
'''
Polynomially interpolate points
:param points:
:param new_x_points:
:return:
'''
new_points = []
for x_point in new_x_points:
point_values = determine_closest_points(points, x_point)
if len(point_values) == :
return NotImplementedError
else:
new_y = 0
new_points.append((x_point, new_y))
def neville_interp(points, new_x_points):
return NotImplementedError
| true |
bd6740da418381bf693348c3200e1d945d8a5142 | Python | ASHWIGUPTA/CodeForces-Ladders-1300 | /python/segment.py | UTF-8 | 1,081 | 2.734375 | 3 | [] | no_license | import math
t=[]
w=[]
def const(a,l,h,k):
if(l==h):
t[k]=a[l]
return
mid =int((l+h)//2)
const(a,l,mid,int(2*k)+1)
const(a,mid+1,h,int(2*k)+2)
t[k]=t[int(k*2)+1]+t[int(k*2)+2]
return
def constSort(a,l,h,k):
if(l==h):
w[k]=a[l]
return
mid =int((l+h)//2)
constSort(a,l,mid,int(2*k)+1)
constSort(a,mid+1,h,int(2*k)+2)
w[k]=w[int(k*2)+1]+w[int(k*2)+2]
return
def call(a,l,h,ql,qh,p):
if(ql<=l and qh>=h):
return a[p]
elif(ql>h or l>qh):
return 0
mid= (l+h)//2;
return call(a,l,mid,ql,qh,int(2*p)+1)+call(a,mid+1,h,ql,qh,int(2*p)+2)
n=[0]*int(input())
a = list(map(int, input().split()))
s=a.copy();
s=sorted(s);
r=math.ceil(math.log(len(n))/math.log(2))
r=math.pow(2,r)
t=[0]*(int(r*2)-1)
w=[0]*(int(r*2)-1)
const(a,0,len(n)-1,0)
constSort(s,0,len(n)-1,0)
q=int(input())
for i in range(q):
b = list(map(int, input().split()))
if(b[0]==1):
print(call(t,0,len(n)-1,b[1]-1,b[2]-1,0))
else:
print(call(w,0,len(n)-1,b[1]-1,b[2]-1,0))
| true |
06683a492ccf4fc6a7aeab346333ac836d0ffe25 | Python | csky6688/flaskr | /app/main/__init__.py | UTF-8 | 464 | 2.53125 | 3 | [
"MIT"
] | permissive | from flask import render_template
from flask import Blueprint
from .. import db
from ..model import Bank
main_bp = Blueprint('main', __name__, url_prefix='/')
@main_bp.route('/')
def index():
import re
data = [x for x in Bank.query.all() if "挑战题" == x.category and x.answer]
for item in data:
item.content = re.sub(r'(\s\s+)|((\(|()\s*(\)|)))|(【\s*】)', "____", item.content)
return render_template('index.html', banks=data) | true |
b4255d4879338a6785b572aef695cfe8ac9639e3 | Python | httran13/ArduinoPythonProject | /ArduinoBookProject/DCMotor.py | UTF-8 | 608 | 3.140625 | 3 | [] | no_license | def dcMotorControl(r, deltaT):
pwmPin.write(r/100.00)
sleep(deltaT)
pwmPin.write(0)
from pyfirmata import Arduino
from time import sleep
import os
port = '/dev/cu.usbmodem621'
board = Arduino(port)
sleep(5)
# set mode of pin 3 as PWM
pwmPin = board.get_pin('d:3:p')
try:
while True:
r = input("Enter value to set motor speed: ")
if ( r > 100) or ( r <= 0 ) :
print "betwee 0 to 100 only"
board.exit()
break
t = input("How long? in seconds")
dcMotorControl(r, t)
except KeyboardInterrupt:
board.exit()
os._exit
| true |
53a7c0257ab6047a3345fc0c1d261a9037d65f24 | Python | zjicmDarkWing/PythonGrayHat | /chapter1-unions.py | UTF-8 | 499 | 3.046875 | 3 | [] | no_license | __author__ = 'DarkWing'
from ctypes import *
class barley_amount(Union):
_fields_ = [
("barley_long",c_long),
("barley_int",c_int),
("barley_char",c_char*8),
]
value = raw_input("Enter the amount of barley to put into the beer vat:")
my_barley = barley_amount(int(value))
print "Barley amount as a long: %ld" %my_barley.barley_long
print "Barley amount as an long: %d" %my_barley.barley_int
print "Barley amount as a char: %s" %my_barley.barley_char | true |
a6af3755875302f84b32cdac2771e10a15b3b214 | Python | elisiojsj/Japanese_nlp_scripts | /get_char_type.py | UTF-8 | 2,375 | 3.34375 | 3 | [
"BSD-3-Clause"
] | permissive | # Function to determine the character class of a single character in a Japanes text.
# Distinguishes between 6 classes, OTHER, ROMAJI, HIRAGANA, KATAKANA, DIGIT, KANJI
#
# These classes can be useful as features in a machine learning classifier.
#
#
# * Copyright (c) 2016, Mads Sørensen Ølsgaard
# * All rights reserved.
#
# * Adapted from KyTea, https://github.com/neubig/kytea/blob/master/src/lib/string-util.cpp
# * Copyright 2009, KyTea Development Team
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
def get_char_type(c):
# find the type of a unicode character
# Adapted from KyTea
OTHER, ROMAJI, HIRAGANA, KATAKANA, DIGIT, KANJI = 'O', 'R', 'H', 'K', 'D', 'k'
if(len(c) == 0):
return OTHER;
val = ord(c)
# Basic latin uppercase, basic latin lowercase
# Full width uppercase, full width lowercase
if (0x41 <= val <= 0x5A) or (0x61 <= val <= 0x7A) or (0xFF21 <= val <= 0xFF3A) or (0xFF41 < val < 0xFF5A):
return ROMAJI;
# hiragana (exclude repetition characters)
if (0x3040 <= val <= 0x3096):
return HIRAGANA
# full width (exclude center dot), half width
if (0x30A0 <= val <= 0x30FF and val != 0x30FB) or (0xFF66 <= val <= 0xFF9F):
return KATAKANA
# basic latin digits
if (0x30 <= val <= 0x39) or (0xFF10 <= val <= 0xFF19):
return DIGIT;
# CJK Unified Ideographs
if ((0x3400 <= val <= 0x4DBF) # CJK Unified Ideographs Extension A
or (0x4E00 <= val <= 0x9FFF) # CJK Unified Ideographs
or (0xF900 <= val <= 0xFAFF) # CJK Compatibility Ideographs
#|| (0x1F200 <= val <= 0x1F2FF) # Enclosed Ideographic Supplement
or (0x20000 <= val <= 0x2A6DF) # CJK Unified Ideographs Extension B
or (0x2A700 <= val <= 0x2B73F) # CJK Unified Ideographs Extension C
or (0x2B740 <= val <= 0x2B81F) # CJK Unified Ideographs Extension D
or (0x2F800 <= val <= 0x2FA1F)): # CJK Compatibility Ideographs Supplement
return KANJI
return OTHER
if __name__=="__main__":
text = '初めての駅 自由が丘の駅で、大井町線から降りると、ママは、トットちゃんの手を引っ張って、改札口を出ようとした。'
print('original text:\t', text)
print('character classes:\t', ''.join([get_char_type(c) for c in text])) | true |
f4cad1d26bc353b7eaf1e71bc01fe9ec79277015 | Python | HankBrownGit/tetris | /Game.py | UTF-8 | 3,426 | 2.859375 | 3 | [
"MIT"
] | permissive | import sys
import BrickPattern
from Gfx import *
from Timer import *
class Game():
def __init__(self, **kwargs):
self._size = kwargs.get('size', [20, 30])
self._brick_size = kwargs.get('brickSize', 10)
self._windowSize = [self._size[0] * self._brick_size, self._size[1] * self._brick_size + 50]
self._cps = kwargs.get('speed', 1)
self._screen = pygame.display.set_mode(self._windowSize)
self._timer = Timer()
self._petrifiedBricks = []
self._brickPattern = BrickPattern.BrickPattern([5, 0])
self._gfx = Gfx(surface=self._screen, brickSize=self._brick_size, size=self._size, timer=self._timer,
brickPattern=self._brickPattern, petrifiedBricks=self._petrifiedBricks)
self._delay = 1 / self._cps
def _checkLines(self):
pass
def _petrifyBricks(self):
anchor = self._brickPattern.getAnchor()
for brick in self._brickPattern.getPattern():
self._petrifiedBricks.append([anchor[0] + brick[0], anchor[1] + brick[1]])
self._brickPattern.respawn()
def _checkBricks(self):
collide = False
anchor = self._brickPattern.getAnchor()
anchor = [anchor[0], anchor[1] + 1]
futureBrick = [[anchor[0] + b[0], anchor[1] + b[1]] for b in self._brickPattern.getPattern()]
for brick in futureBrick:
for petriBrick in self._petrifiedBricks:
if brick == petriBrick:
collide = True
if collide:
self._petrifyBricks()
return 0
if self._brickPattern.getMaxY() >= self._size[1] - 1:
self._petrifyBricks()
return 0
def run(self):
""""
Game main loop
"""
self._timer.start()
exitGame = False
while not exitGame:
if self._timer.getTotalTime() >= self._delay:
self._brickPattern.moveDown()
self._timer.reset()
self._checkBricks()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
print("Right key pressed")
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type is pygame.KEYDOWN:
key = pygame.key.name(event.key)
if key == "escape":
exitGame = True
if key == "up":
print("next max: {}, nextMin {}".format(self._brickPattern.getMaxX(True),
self._brickPattern.getMinX(True)))
if self._brickPattern.getMaxX(True) < self._size[1] and self._brickPattern.getMinX(True) >= 0:
self._brickPattern.rotate()
if key == "right":
if self._brickPattern.getMaxX() < self._size[1]:
self._brickPattern.moveRight()
if key == "down":
self._brickPattern.moveDown()
if key == "left":
if self._brickPattern.getMinY() > 0:
self._brickPattern.moveLeft()
self._gfx.draw() | true |
8dfaf9b81f8edeec4f401cb94cf17b90e5151f6d | Python | Silviron/Python | /ex2_5_5.py | UTF-8 | 116 | 2.734375 | 3 | [] | no_license | jusik=1000000
jusik_Mon=jusik*(1-0.3)
jusik_Thu=jusik_Mon*(1-0.3)
jusic_Wed=jusik_Thu*(1-0.3)
print(jusic_Wed)
| true |
4d734f0f3528a7d52904ee5e25472581a48dfc18 | Python | mrbazzan/flaskAPI | /binary_search_tree.py | UTF-8 | 2,174 | 3.8125 | 4 | [] | no_license |
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BinarySearchTree:
def __init__(self):
self.root = None
def _insert_recursive(self, data_, node):
if data_["id"] < node.data["id"]:
if node.left is None:
node.left = Node(data_)
else:
self._insert_recursive(data_, node.left)
elif data_["id"] > node.data["id"]:
if node.right is None:
node.right = Node(data_)
else:
self._insert_recursive(data_, node.right)
else:
return # binary search tree already has value and it doesn't support duplicates.
def insert(self, data_):
if self.root is None:
self.root = Node(data_)
else:
self._insert_recursive(data_, self.root) # only be used by insert method(the `_` in front)
def _search_recursive(self, id, node):
if node is None:
return
if id == node.data['id']:
return node.data
if node.left is None and node.right is None:
return False
if id < node.data['id']:
# check the left node
return self._search_recursive(id, node.left)
else:
return self._search_recursive(id, node.right)
def search(self, id):
id = int(id)
if self.root is None:
return False
return self._search_recursive(id, self.root)
def _recursive_tree(self, node):
the_list = (node.left, node.right)
if the_list == (None, None):
return
else:
print('Node: ', node.data)
print(' Sub-node:', list(map(lambda x: x.data if x is not None else x, the_list)))
for each_node in the_list:
if each_node:
self._recursive_tree(each_node)
def print_tree(self):
if self.root is None:
print(None)
else:
node = self.root
self._recursive_tree(node)
| true |
47ab587f443cd24fec6346916aa4709f196ef400 | Python | cakepower/MapMakingKorea | /day5/day5_twitter.py | UTF-8 | 1,274 | 2.84375 | 3 | [] | no_license | #!__*__coding:utf-8__*__
import tweepy
import os
TOKEN = "44498241-6O6QZbeJ13v3oS6OmVXLnkU7gHzfyDctQ1jIlCk"
TOKEN_KEY = "lPyhLTPIacEamNtouEXAOaZOaZviIruLjDDWdWcyhrI"
CON_SECRET = "sy9SrErwMFoJRv1M0Ywug"
CON_SECRET_KEY = "uxHZxm56KfI6TPwpRepRqn4Kvna920Hb3jpNoZ5NrF4"
def get_twit():
auth = tweepy.OAuthHandler(CON_SECRET, CON_SECRET_KEY)
auth.set_access_token(TOKEN, TOKEN_KEY)
api = tweepy.API(auth)
location = "%s,%s,%s" % ("35.95", "128.25", "1000km") # 검색기준(대한민국 중심) 좌표, 반지름
keyword = "#임시공휴일" # OR 로 검색어 묶어줌, 검색어 5개(반드시 OR 대문자로) # api 생성
wfile = open(os.getcwd() + "/twitter.txt", mode='w', encoding="utf-8") # 쓰기 모드
# twitter 검색 cursor 선언
cursor = tweepy.Cursor(api.search,
q=keyword,
since='2016-01-01', # 2015-01-01 이후에 작성된 트윗들로 가져옴
count=100, # 페이지당 반환할 트위터 수 최대 100
geocode=location, # 검색 반경 조건
include_entities=True)
for i, tweet in enumerate(cursor.items()):
print("{}: {}".format(i, tweet.text))
wfile.write(tweet.text + '\n')
wfile.close()
get_twit() | true |
91f3fd78df874c6b2dda1152f2a17fbb8789a50b | Python | SimpleIcy/crossin_practice | /medals_ranking.py | UTF-8 | 1,456 | 3.609375 | 4 | [] | no_license |
class MedalsInfo:
def __init__(self, country, gold, sliver, bronze):
self.country = country
self.gold = gold
self.sliver = sliver
self.bronze = bronze
def add_modal(self, modal):
if modal == 'gold':
self.gold += 1
elif modal == 'sliver':
self.sliver += 1
elif modal == 'bronze':
self.bronze += 1
else:
print('Type in data should be like this: MODAL_TYPE\n\
MODAL_TYPE include gold,sliver, bronze.or the first letter in lowercase')
def count(self):
return self.gold + self.sliver + self.bronze
def __str__(self):
return f" Country: {self.country}\tGold: {self.gold}\t Sliver: {self.sliver}\t Bronze: {self.bronze}"
# initialize data
china = MedalsInfo('china', 29, 20, 13)
usa = MedalsInfo('USA', 17, 24, 10)
india = MedalsInfo('India', 18, 14, 9)
# add modals
china.add_modal('gold')
china.add_modal('sliver')
usa.add_modal('sliver')
india.add_modal('bronze')
all_list = [china, usa, india]
# sorted all modals
order_by_sum = sorted(all_list, key=lambda x: x.count(), reverse=True)
# sorted gold modals
order_by_gold = sorted(all_list, key=lambda x: x.gold, reverse=True)
# print the order
print('\t\t\t奖牌总数榜\t\t\t')
for i in order_by_sum:
print(i)
print('\t\t\t金牌榜\t\t\t')
for i in order_by_gold:
print(i)
| true |
3f93ff10962653d86c2301f1c0d4ee324e2c6e8d | Python | VineethSendilraj/RosalindBioinformaticsCourse | /11_Genome_Assembly_as_Shortest_Superstring.py | UTF-8 | 4,555 | 2.875 | 3 | [] | no_license | import math
from collections import namedtuple
class loadGenes:
splitFile = []
name, Gsequence = '', ''
# <-- START: Splits the file into list -->
def isNewSequence(line):
return line[0] == '>'
def newSequenceStart(self, line):
self.name = line[1:]
def addNewSequenceInfo(self):
if(len(self.Gsequence) > 0):
self.splitFile.append([self.name, self.Gsequence])
self.name, self.Gsequence = '', ''
def appendSequence(self, line):
self.Gsequence += line
def numberOfSequences(self):
return len(self.splitFile)
def getSequence(self, index):
if ( index >= self.numberOfSequences()):
return None
gene = self.splitFile[index]
return (gene[0],gene[1])
def findGeneByName(self, name):
for seq in self.splitFile:
if ( seq[0]== name):
return seq[1]
#<-- START: Splits the file and appends it to the list in the format [Name, Gene Sequence] -->
def loadFile(fileName):
with open(fileName, 'r') as f:
geneAssembly = loadGenes()
for line in f:
updatedLine = line.rstrip()
if loadGenes.isNewSequence(updatedLine):
geneAssembly.addNewSequenceInfo()
geneAssembly.newSequenceStart(updatedLine)
else:
geneAssembly.appendSequence(updatedLine)
geneAssembly.addNewSequenceInfo()
return geneAssembly
#<-- END: Splits the file and appends it to the list in the format [Name, Gene Sequence] -->
class checkOverlap:
geneRel = namedtuple('geneRel',"s1 s1gene s2 s2gene maxlen")
def setMinVal( s):
return math.floor(len(s)/2) + 1
def __init__(self, genes) -> None:
self.genes = genes
# <-- START: Checks if the two strings overlap -->
def oCheck(s1, s2, minLength):
start = 0
while True:
index = s1.find(s2[:minLength], start)
if index == -1:
return (False,-1)
if s2.startswith(s1[index:]):
return (True, len(s1[index:]))
start +=1
def findMaxMatch(self,s1):
maxlen, lastMatch, lastmatchname = 0, "", ""
for i in range(self.genes.numberOfSequences()):
name,gene = self.genes.getSequence(i)
if ( gene != s1):
result, len = checkOverlap.oCheck(s1,gene,checkOverlap.setMinVal(s1))
if ( result ==True and maxlen < len):
maxlen = len
lastMatch = gene
lastmatchname = name
return (maxlen, lastmatchname, lastMatch)
def buildSeq(self):
finalSeq=[]
for i in range(self.genes.numberOfSequences()):
name,gene = self.genes.getSequence(i)
maxlen, matchname, matchgene =self.findMaxMatch(gene)
if (maxlen>0 ):
finalSeq.append(self.geneRel(name, gene, matchname, matchgene, maxlen))
return finalSeq
def getFirstSeq (self,lst):
for elem in lst:
if ( sum (1 for cur in lst if cur.s2 == elem.s1)==0):
return elem
def buildChromosome(self):
result = self.buildSeq()
currentgene = self.getFirstSeq(result) # max(result, key = lambda cur: cur.maxlen )
finalOutput =[]
while(len(result)>0 ):
finalOutput.append(currentgene)
result.remove(currentgene)
if ( len(result)==0 ):
break
nextSeq =None
for elem in result:
if (elem.s1 == currentgene.s2):
nextSeq = elem
break
currentgene =(nextSeq, None) [nextSeq == None]
if (currentgene == None):
#sequence stops
break
return finalOutput
def buildSequence(self):
result = self.buildChromosome()
start = result[0]
strChromoSome = start.s1gene + start.s2gene[start.maxlen:]
result.remove(start)
for grel in result:
strChromoSome += grel.s2gene[grel.maxlen:]
return strChromoSome
def genomeShortestString(fileName):
genes = loadFile(fileName)
x = checkOverlap(genes)
x.buildChromosome()
print(x.buildSequence())
fileName = '11_Genome_AssemblyAs_Shortest_Superstring_Test_File.txt'
genomeShortestString(fileName) | true |
b03c098f9e250594598a26f370ca53315204c385 | Python | kideveloper612/Phone-Search-Python-Scripts | /Filtered.py | UTF-8 | 37,225 | 2.546875 | 3 | [] | no_license | import threading
import json
import requests
import time
from requests.exceptions import ConnectTimeout, ConnectionError
from datetime import datetime
from bs4 import BeautifulSoup
import os
import csv
import random
import string
import sys
import re
from datetime import datetime
csv_header = [['NAME', 'ADDRESS', 'PHONE NUMBER', 'AGE', 'DATE']]
output_directory = '/home/ubuntu/laptop/output'
class myThread(threading.Thread):
def __init__(self, threadID, phone_list, proxy):
threading.Thread.__init__(self)
self.threadID = threadID
self.phone_list = phone_list
self.myproxy = proxy
def run(self):
print("Starting to working on {} thread ".format(self.threadID))
return rotate(self.phone_list, self.myproxy)
def write_direct_csv(lines, filename):
global output_directory
with open(os.path.join(output_directory, filename), 'a', encoding="utf-8", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerows(lines)
csv_file.close()
def write_csv(lines, filename):
global output_directory
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
if not os.path.isfile(os.path.join(output_directory, filename)):
write_direct_csv(lines=csv_header, filename=filename)
write_direct_csv(lines=lines, filename=filename)
def write_phones(lines, file_name):
global output_directory
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
with open(os.path.join(output_directory, file_name), 'a', encoding="utf-8", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerows(lines)
csv_file.close()
def read_phones():
global output_directory
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
if not os.path.isfile(os.path.join(output_directory, pass_file)):
return []
file = open(os.path.join(output_directory, pass_file), encoding="utf-8")
phones = file.readlines()
file.close()
return phones
def rotate(phone_list, myproxy):
proxy = {
'http': 'http://jfisch01:m4BMkZgP@{}'.format(myproxy),
'https': 'https://jfisch01:m4BMkZgP@{}'.format(myproxy)
}
if not phone_list:
sys.exit()
for phone in phone_list:
if phone in pass_phones:
print('!')
continue
flag = True
while flag:
that_res = thread_run(phone, proxy)
if that_res.status_code == 404:
write_phones(lines=[[phone.strip()]], file_name=pass_file)
flag = False
if that_res is not None and that_res.status_code == 200:
write_phones(lines=[[phone.strip()]], file_name=pass_file)
try:
soup = BeautifulSoup(that_res.text, 'html5lib')
table_soup = soup.select('.people-list .card-block')
if table_soup:
owner_soup = table_soup[0].find(attrs={'class': 'card-title'})
if owner_soup:
name = owner_soup.find(attrs={'class': 'larger'}).text.strip()
age_soup = table_soup[0].find('h3', text=re.compile('Age:'))
if age_soup:
age = age_soup.next_element.next_element.strip()
else:
age = ''
address_soup = table_soup[0].find('h3', text=re.compile('Current Home Address:'))
if address_soup:
address = address_soup.find_next('a').text.replace('\n', ' ').strip()
else:
address = ''
line = [name, address, phone.strip(), age, date]
print('ok')
write_csv(lines=[line], filename=fileName)
else:
print('~')
write_phones(lines=[[phone.strip()]], file_name=Not_Found)
flag = False
except Exception as e:
print(e)
write_phones(lines=[[phone.strip()]], file_name=failed_file)
def thread_run(number, proxy):
try:
req_number = '{}-{}-{}'.format(number[1:4], number[4:7], number[7:11])
url = 'https://www.fastpeoplesearch.com/{}'.format(req_number)
header = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
}
s = requests.Session()
time.sleep(10)
res = s.get(url=url, headers=header, proxies=proxy, timeout=10)
print(res.status_code)
if res.status_code == 429:
sys.exit()
if res.status_code != 200 and res.status_code != 404:
time.sleep(5)
return thread_run(number=number, proxy=proxy)
return res
except ConnectionError:
time.sleep(2)
return thread_run(number=number, proxy=proxy)
except Exception as e:
print('Error: ', e)
time.sleep(3)
return thread_run(number=number, proxy=proxy)
def source_txt(path):
lines = []
file = open(file=path, encoding='utf-8')
for line in file.readlines():
lines.append(line)
file.close()
number_list = []
i = len(lines) // 252 + 1
for j in range(252):
number_list.append(lines[j * i:(j + 1) * i])
return number_list
def get_proxy(file):
f = open(file, 'r')
rows = f.readlines()
f.close()
records = []
for r in rows:
records.append(r.strip() + ':29842')
return records
def main():
threads = []
# Create new threads
thread1 = myThread(1, thread_number_list[0], proxy_list[0])
thread2 = myThread(2, thread_number_list[1], proxy_list[1])
thread3 = myThread(3, thread_number_list[2], proxy_list[2])
thread4 = myThread(4, thread_number_list[3], proxy_list[3])
thread5 = myThread(5, thread_number_list[4], proxy_list[4])
thread6 = myThread(6, thread_number_list[5], proxy_list[5])
thread7 = myThread(7, thread_number_list[6], proxy_list[6])
thread8 = myThread(8, thread_number_list[7], proxy_list[7])
thread9 = myThread(9, thread_number_list[8], proxy_list[8])
thread10 = myThread(10, thread_number_list[9], proxy_list[9])
thread11 = myThread(11, thread_number_list[10], proxy_list[10])
thread12 = myThread(12, thread_number_list[11], proxy_list[11])
thread13 = myThread(13, thread_number_list[12], proxy_list[12])
thread14 = myThread(14, thread_number_list[13], proxy_list[13])
thread15 = myThread(15, thread_number_list[14], proxy_list[14])
thread16 = myThread(16, thread_number_list[15], proxy_list[15])
thread17 = myThread(17, thread_number_list[16], proxy_list[16])
thread18 = myThread(18, thread_number_list[17], proxy_list[17])
thread19 = myThread(19, thread_number_list[18], proxy_list[18])
thread20 = myThread(20, thread_number_list[19], proxy_list[19])
thread21 = myThread(21, thread_number_list[20], proxy_list[20])
thread22 = myThread(22, thread_number_list[21], proxy_list[21])
thread23 = myThread(23, thread_number_list[22], proxy_list[22])
thread24 = myThread(24, thread_number_list[23], proxy_list[23])
thread25 = myThread(25, thread_number_list[24], proxy_list[24])
thread26 = myThread(26, thread_number_list[25], proxy_list[25])
thread27 = myThread(27, thread_number_list[26], proxy_list[26])
thread28 = myThread(28, thread_number_list[27], proxy_list[27])
thread29 = myThread(29, thread_number_list[28], proxy_list[28])
thread30 = myThread(30, thread_number_list[29], proxy_list[29])
thread31 = myThread(31, thread_number_list[30], proxy_list[30])
thread32 = myThread(32, thread_number_list[31], proxy_list[31])
thread33 = myThread(33, thread_number_list[32], proxy_list[32])
thread34 = myThread(34, thread_number_list[33], proxy_list[33])
thread35 = myThread(35, thread_number_list[34], proxy_list[34])
thread36 = myThread(36, thread_number_list[35], proxy_list[35])
thread37 = myThread(37, thread_number_list[36], proxy_list[36])
thread38 = myThread(38, thread_number_list[37], proxy_list[37])
thread39 = myThread(39, thread_number_list[38], proxy_list[38])
thread40 = myThread(40, thread_number_list[39], proxy_list[39])
thread41 = myThread(41, thread_number_list[40], proxy_list[40])
thread42 = myThread(42, thread_number_list[41], proxy_list[41])
thread43 = myThread(43, thread_number_list[42], proxy_list[42])
thread44 = myThread(44, thread_number_list[43], proxy_list[43])
thread45 = myThread(45, thread_number_list[44], proxy_list[44])
thread46 = myThread(46, thread_number_list[45], proxy_list[45])
thread47 = myThread(47, thread_number_list[46], proxy_list[46])
thread48 = myThread(48, thread_number_list[47], proxy_list[47])
thread49 = myThread(49, thread_number_list[48], proxy_list[48])
thread50 = myThread(50, thread_number_list[49], proxy_list[49])
thread51 = myThread(51, thread_number_list[50], proxy_list[50])
thread52 = myThread(52, thread_number_list[51], proxy_list[51])
thread53 = myThread(53, thread_number_list[52], proxy_list[52])
thread54 = myThread(54, thread_number_list[53], proxy_list[53])
thread55 = myThread(55, thread_number_list[54], proxy_list[54])
thread56 = myThread(56, thread_number_list[55], proxy_list[55])
thread57 = myThread(57, thread_number_list[56], proxy_list[56])
thread58 = myThread(58, thread_number_list[57], proxy_list[57])
thread59 = myThread(59, thread_number_list[58], proxy_list[58])
thread60 = myThread(60, thread_number_list[59], proxy_list[59])
thread61 = myThread(61, thread_number_list[60], proxy_list[60])
thread62 = myThread(62, thread_number_list[61], proxy_list[61])
thread63 = myThread(63, thread_number_list[62], proxy_list[62])
thread64 = myThread(64, thread_number_list[63], proxy_list[63])
thread65 = myThread(65, thread_number_list[64], proxy_list[64])
thread66 = myThread(66, thread_number_list[65], proxy_list[65])
thread67 = myThread(67, thread_number_list[66], proxy_list[66])
thread68 = myThread(68, thread_number_list[67], proxy_list[67])
thread69 = myThread(69, thread_number_list[68], proxy_list[68])
thread70 = myThread(70, thread_number_list[69], proxy_list[69])
thread71 = myThread(71, thread_number_list[70], proxy_list[70])
thread72 = myThread(72, thread_number_list[71], proxy_list[71])
thread73 = myThread(73, thread_number_list[72], proxy_list[72])
thread74 = myThread(74, thread_number_list[73], proxy_list[73])
thread75 = myThread(75, thread_number_list[74], proxy_list[74])
thread76 = myThread(76, thread_number_list[75], proxy_list[75])
thread77 = myThread(77, thread_number_list[76], proxy_list[76])
thread78 = myThread(78, thread_number_list[77], proxy_list[77])
thread79 = myThread(79, thread_number_list[78], proxy_list[78])
thread80 = myThread(80, thread_number_list[79], proxy_list[79])
thread81 = myThread(81, thread_number_list[80], proxy_list[80])
thread82 = myThread(82, thread_number_list[81], proxy_list[81])
thread83 = myThread(83, thread_number_list[82], proxy_list[82])
thread84 = myThread(84, thread_number_list[83], proxy_list[83])
thread85 = myThread(85, thread_number_list[84], proxy_list[84])
thread86 = myThread(86, thread_number_list[85], proxy_list[85])
thread87 = myThread(87, thread_number_list[86], proxy_list[86])
thread88 = myThread(88, thread_number_list[87], proxy_list[87])
thread89 = myThread(89, thread_number_list[88], proxy_list[88])
thread90 = myThread(90, thread_number_list[89], proxy_list[89])
thread91 = myThread(91, thread_number_list[90], proxy_list[90])
thread92 = myThread(92, thread_number_list[91], proxy_list[91])
thread93 = myThread(93, thread_number_list[92], proxy_list[92])
thread94 = myThread(94, thread_number_list[93], proxy_list[93])
thread95 = myThread(95, thread_number_list[94], proxy_list[94])
thread96 = myThread(96, thread_number_list[95], proxy_list[95])
thread97 = myThread(97, thread_number_list[96], proxy_list[96])
thread98 = myThread(98, thread_number_list[97], proxy_list[97])
thread99 = myThread(99, thread_number_list[98], proxy_list[98])
thread100 = myThread(100, thread_number_list[99], proxy_list[99])
thread101 = myThread(101, thread_number_list[100], proxy_list[100])
thread102 = myThread(102, thread_number_list[101], proxy_list[101])
thread103 = myThread(103, thread_number_list[102], proxy_list[102])
thread104 = myThread(104, thread_number_list[103], proxy_list[103])
thread105 = myThread(105, thread_number_list[104], proxy_list[104])
thread106 = myThread(106, thread_number_list[105], proxy_list[105])
thread107 = myThread(107, thread_number_list[106], proxy_list[106])
thread108 = myThread(108, thread_number_list[107], proxy_list[107])
thread109 = myThread(109, thread_number_list[108], proxy_list[108])
thread110 = myThread(110, thread_number_list[109], proxy_list[109])
thread111 = myThread(111, thread_number_list[110], proxy_list[110])
thread112 = myThread(112, thread_number_list[111], proxy_list[111])
thread113 = myThread(113, thread_number_list[112], proxy_list[112])
thread114 = myThread(114, thread_number_list[113], proxy_list[113])
thread115 = myThread(115, thread_number_list[114], proxy_list[114])
thread116 = myThread(116, thread_number_list[115], proxy_list[115])
thread117 = myThread(117, thread_number_list[116], proxy_list[116])
thread118 = myThread(118, thread_number_list[117], proxy_list[117])
thread119 = myThread(119, thread_number_list[118], proxy_list[118])
thread120 = myThread(120, thread_number_list[119], proxy_list[119])
thread121 = myThread(121, thread_number_list[120], proxy_list[120])
thread122 = myThread(122, thread_number_list[121], proxy_list[121])
thread123 = myThread(123, thread_number_list[122], proxy_list[122])
thread124 = myThread(124, thread_number_list[123], proxy_list[123])
thread125 = myThread(125, thread_number_list[124], proxy_list[124])
thread126 = myThread(126, thread_number_list[125], proxy_list[125])
thread127 = myThread(127, thread_number_list[126], proxy_list[126])
thread128 = myThread(128, thread_number_list[127], proxy_list[127])
thread129 = myThread(129, thread_number_list[128], proxy_list[128])
thread130 = myThread(130, thread_number_list[129], proxy_list[129])
thread131 = myThread(131, thread_number_list[130], proxy_list[130])
thread132 = myThread(132, thread_number_list[131], proxy_list[131])
thread133 = myThread(133, thread_number_list[132], proxy_list[132])
thread134 = myThread(134, thread_number_list[133], proxy_list[133])
thread135 = myThread(135, thread_number_list[134], proxy_list[134])
thread136 = myThread(136, thread_number_list[135], proxy_list[135])
thread137 = myThread(137, thread_number_list[136], proxy_list[136])
thread138 = myThread(138, thread_number_list[137], proxy_list[137])
thread139 = myThread(139, thread_number_list[138], proxy_list[138])
thread140 = myThread(140, thread_number_list[139], proxy_list[139])
thread141 = myThread(141, thread_number_list[140], proxy_list[140])
thread142 = myThread(142, thread_number_list[141], proxy_list[141])
thread143 = myThread(143, thread_number_list[142], proxy_list[142])
thread144 = myThread(144, thread_number_list[143], proxy_list[143])
thread145 = myThread(145, thread_number_list[144], proxy_list[144])
thread146 = myThread(146, thread_number_list[145], proxy_list[145])
thread147 = myThread(147, thread_number_list[146], proxy_list[146])
thread148 = myThread(148, thread_number_list[147], proxy_list[147])
thread149 = myThread(149, thread_number_list[148], proxy_list[148])
thread150 = myThread(150, thread_number_list[149], proxy_list[149])
thread151 = myThread(151, thread_number_list[150], proxy_list[150])
thread152 = myThread(152, thread_number_list[151], proxy_list[151])
thread153 = myThread(153, thread_number_list[152], proxy_list[152])
thread154 = myThread(154, thread_number_list[153], proxy_list[153])
thread155 = myThread(155, thread_number_list[154], proxy_list[154])
thread156 = myThread(156, thread_number_list[155], proxy_list[155])
thread157 = myThread(157, thread_number_list[156], proxy_list[156])
thread158 = myThread(158, thread_number_list[157], proxy_list[157])
thread159 = myThread(159, thread_number_list[158], proxy_list[158])
thread160 = myThread(160, thread_number_list[159], proxy_list[159])
thread161 = myThread(161, thread_number_list[160], proxy_list[160])
thread162 = myThread(162, thread_number_list[161], proxy_list[161])
thread163 = myThread(163, thread_number_list[162], proxy_list[162])
thread164 = myThread(164, thread_number_list[163], proxy_list[163])
thread165 = myThread(165, thread_number_list[164], proxy_list[164])
thread166 = myThread(166, thread_number_list[165], proxy_list[165])
thread167 = myThread(167, thread_number_list[166], proxy_list[166])
thread168 = myThread(168, thread_number_list[167], proxy_list[167])
thread169 = myThread(169, thread_number_list[168], proxy_list[168])
thread170 = myThread(170, thread_number_list[169], proxy_list[169])
thread171 = myThread(171, thread_number_list[170], proxy_list[170])
thread172 = myThread(172, thread_number_list[171], proxy_list[171])
thread173 = myThread(173, thread_number_list[172], proxy_list[172])
thread174 = myThread(174, thread_number_list[173], proxy_list[173])
thread175 = myThread(175, thread_number_list[174], proxy_list[174])
thread176 = myThread(176, thread_number_list[175], proxy_list[175])
thread177 = myThread(177, thread_number_list[176], proxy_list[176])
thread178 = myThread(178, thread_number_list[177], proxy_list[177])
thread179 = myThread(179, thread_number_list[178], proxy_list[178])
thread180 = myThread(180, thread_number_list[179], proxy_list[179])
thread181 = myThread(181, thread_number_list[180], proxy_list[180])
thread182 = myThread(182, thread_number_list[181], proxy_list[181])
thread183 = myThread(183, thread_number_list[182], proxy_list[182])
thread184 = myThread(184, thread_number_list[183], proxy_list[183])
thread185 = myThread(185, thread_number_list[184], proxy_list[184])
thread186 = myThread(186, thread_number_list[185], proxy_list[185])
thread187 = myThread(187, thread_number_list[186], proxy_list[186])
thread188 = myThread(188, thread_number_list[187], proxy_list[187])
thread189 = myThread(189, thread_number_list[188], proxy_list[188])
thread190 = myThread(190, thread_number_list[189], proxy_list[189])
thread191 = myThread(191, thread_number_list[190], proxy_list[190])
thread192 = myThread(192, thread_number_list[191], proxy_list[191])
thread193 = myThread(193, thread_number_list[192], proxy_list[192])
thread194 = myThread(194, thread_number_list[193], proxy_list[193])
thread195 = myThread(195, thread_number_list[194], proxy_list[194])
thread196 = myThread(196, thread_number_list[195], proxy_list[195])
thread197 = myThread(197, thread_number_list[196], proxy_list[196])
thread198 = myThread(198, thread_number_list[197], proxy_list[197])
thread199 = myThread(199, thread_number_list[198], proxy_list[198])
thread200 = myThread(200, thread_number_list[199], proxy_list[199])
thread201 = myThread(201, thread_number_list[200], proxy_list[200])
thread202 = myThread(202, thread_number_list[201], proxy_list[201])
thread203 = myThread(203, thread_number_list[202], proxy_list[202])
thread204 = myThread(204, thread_number_list[203], proxy_list[203])
thread205 = myThread(205, thread_number_list[204], proxy_list[204])
thread206 = myThread(206, thread_number_list[205], proxy_list[205])
thread207 = myThread(207, thread_number_list[206], proxy_list[206])
thread208 = myThread(208, thread_number_list[207], proxy_list[207])
thread209 = myThread(209, thread_number_list[208], proxy_list[208])
thread210 = myThread(210, thread_number_list[209], proxy_list[209])
thread211 = myThread(211, thread_number_list[210], proxy_list[210])
thread212 = myThread(212, thread_number_list[211], proxy_list[211])
thread213 = myThread(213, thread_number_list[212], proxy_list[212])
thread214 = myThread(214, thread_number_list[213], proxy_list[213])
thread215 = myThread(215, thread_number_list[214], proxy_list[214])
thread216 = myThread(216, thread_number_list[215], proxy_list[215])
thread217 = myThread(217, thread_number_list[216], proxy_list[216])
thread218 = myThread(218, thread_number_list[217], proxy_list[217])
thread219 = myThread(219, thread_number_list[218], proxy_list[218])
thread220 = myThread(220, thread_number_list[219], proxy_list[219])
thread221 = myThread(221, thread_number_list[220], proxy_list[220])
thread222 = myThread(222, thread_number_list[221], proxy_list[221])
thread223 = myThread(223, thread_number_list[222], proxy_list[222])
thread224 = myThread(224, thread_number_list[223], proxy_list[223])
thread225 = myThread(225, thread_number_list[224], proxy_list[224])
thread226 = myThread(226, thread_number_list[225], proxy_list[225])
thread227 = myThread(227, thread_number_list[226], proxy_list[226])
thread228 = myThread(228, thread_number_list[227], proxy_list[227])
thread229 = myThread(229, thread_number_list[228], proxy_list[228])
thread230 = myThread(230, thread_number_list[229], proxy_list[229])
thread231 = myThread(231, thread_number_list[230], proxy_list[230])
thread232 = myThread(232, thread_number_list[231], proxy_list[231])
thread233 = myThread(233, thread_number_list[232], proxy_list[232])
thread234 = myThread(234, thread_number_list[233], proxy_list[233])
thread235 = myThread(235, thread_number_list[234], proxy_list[234])
thread236 = myThread(236, thread_number_list[235], proxy_list[235])
thread237 = myThread(237, thread_number_list[236], proxy_list[236])
thread238 = myThread(238, thread_number_list[237], proxy_list[237])
thread239 = myThread(239, thread_number_list[238], proxy_list[238])
thread240 = myThread(240, thread_number_list[239], proxy_list[239])
thread241 = myThread(241, thread_number_list[240], proxy_list[240])
thread242 = myThread(242, thread_number_list[241], proxy_list[241])
thread243 = myThread(243, thread_number_list[242], proxy_list[242])
thread244 = myThread(244, thread_number_list[243], proxy_list[243])
thread245 = myThread(245, thread_number_list[244], proxy_list[244])
thread246 = myThread(246, thread_number_list[245], proxy_list[245])
thread247 = myThread(247, thread_number_list[246], proxy_list[246])
thread248 = myThread(248, thread_number_list[247], proxy_list[247])
thread249 = myThread(249, thread_number_list[248], proxy_list[248])
thread250 = myThread(250, thread_number_list[249], proxy_list[249])
thread251 = myThread(251, thread_number_list[250], proxy_list[250])
thread252 = myThread(252, thread_number_list[251], proxy_list[251])
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
thread6.start()
thread7.start()
thread8.start()
thread9.start()
thread10.start()
thread11.start()
thread12.start()
thread13.start()
thread14.start()
thread15.start()
thread16.start()
thread17.start()
thread18.start()
thread19.start()
thread20.start()
thread21.start()
thread22.start()
thread23.start()
thread24.start()
thread25.start()
thread26.start()
thread27.start()
thread28.start()
thread29.start()
thread30.start()
thread31.start()
thread32.start()
thread33.start()
thread34.start()
thread35.start()
thread36.start()
thread37.start()
thread38.start()
thread39.start()
thread40.start()
thread41.start()
thread42.start()
thread43.start()
thread44.start()
thread45.start()
thread46.start()
thread47.start()
thread48.start()
thread49.start()
thread50.start()
thread51.start()
thread52.start()
thread53.start()
thread54.start()
thread55.start()
thread56.start()
thread57.start()
thread58.start()
thread59.start()
thread60.start()
thread61.start()
thread62.start()
thread63.start()
thread64.start()
thread65.start()
thread66.start()
thread67.start()
thread68.start()
thread69.start()
thread70.start()
thread71.start()
thread72.start()
thread73.start()
thread74.start()
thread75.start()
thread76.start()
thread77.start()
thread78.start()
thread79.start()
thread80.start()
thread81.start()
thread82.start()
thread83.start()
thread84.start()
thread85.start()
thread86.start()
thread87.start()
thread88.start()
thread89.start()
thread90.start()
thread91.start()
thread92.start()
thread93.start()
thread94.start()
thread95.start()
thread96.start()
thread97.start()
thread98.start()
thread99.start()
thread100.start()
thread101.start()
thread102.start()
thread103.start()
thread104.start()
thread105.start()
thread106.start()
thread107.start()
thread108.start()
thread109.start()
thread110.start()
thread111.start()
thread112.start()
thread113.start()
thread114.start()
thread115.start()
thread116.start()
thread117.start()
thread118.start()
thread119.start()
thread120.start()
thread121.start()
thread122.start()
thread123.start()
thread124.start()
thread125.start()
thread126.start()
thread127.start()
thread128.start()
thread129.start()
thread130.start()
thread131.start()
thread132.start()
thread133.start()
thread134.start()
thread135.start()
thread136.start()
thread137.start()
thread138.start()
thread139.start()
thread140.start()
thread141.start()
thread142.start()
thread143.start()
thread144.start()
thread145.start()
thread146.start()
thread147.start()
thread148.start()
thread149.start()
thread150.start()
thread151.start()
thread152.start()
thread153.start()
thread154.start()
thread155.start()
thread156.start()
thread157.start()
thread158.start()
thread159.start()
thread160.start()
thread161.start()
thread162.start()
thread163.start()
thread164.start()
thread165.start()
thread166.start()
thread167.start()
thread168.start()
thread169.start()
thread170.start()
thread171.start()
thread172.start()
thread173.start()
thread174.start()
thread175.start()
thread176.start()
thread177.start()
thread178.start()
thread179.start()
thread180.start()
thread181.start()
thread182.start()
thread183.start()
thread184.start()
thread185.start()
thread186.start()
thread187.start()
thread188.start()
thread189.start()
thread190.start()
thread191.start()
thread192.start()
thread193.start()
thread194.start()
thread195.start()
thread196.start()
thread197.start()
thread198.start()
thread199.start()
thread200.start()
thread201.start()
thread202.start()
thread203.start()
thread204.start()
thread205.start()
thread206.start()
thread207.start()
thread208.start()
thread209.start()
thread210.start()
thread211.start()
thread212.start()
thread213.start()
thread214.start()
thread215.start()
thread216.start()
thread217.start()
thread218.start()
thread219.start()
thread220.start()
thread221.start()
thread222.start()
thread223.start()
thread224.start()
thread225.start()
thread226.start()
thread227.start()
thread228.start()
thread229.start()
thread230.start()
thread231.start()
thread232.start()
thread233.start()
thread234.start()
thread235.start()
thread236.start()
thread237.start()
thread238.start()
thread239.start()
thread240.start()
thread241.start()
thread242.start()
thread243.start()
thread244.start()
thread245.start()
thread246.start()
thread247.start()
thread248.start()
thread249.start()
thread250.start()
thread251.start()
thread252.start()
# Add threads to thread list
threads.append(thread1)
threads.append(thread2)
threads.append(thread3)
threads.append(thread4)
threads.append(thread5)
threads.append(thread6)
threads.append(thread7)
threads.append(thread8)
threads.append(thread9)
threads.append(thread10)
threads.append(thread11)
threads.append(thread12)
threads.append(thread13)
threads.append(thread14)
threads.append(thread15)
threads.append(thread16)
threads.append(thread17)
threads.append(thread18)
threads.append(thread19)
threads.append(thread20)
threads.append(thread21)
threads.append(thread22)
threads.append(thread23)
threads.append(thread24)
threads.append(thread25)
threads.append(thread26)
threads.append(thread27)
threads.append(thread28)
threads.append(thread29)
threads.append(thread30)
threads.append(thread31)
threads.append(thread32)
threads.append(thread33)
threads.append(thread34)
threads.append(thread35)
threads.append(thread36)
threads.append(thread37)
threads.append(thread38)
threads.append(thread39)
threads.append(thread40)
threads.append(thread41)
threads.append(thread42)
threads.append(thread43)
threads.append(thread44)
threads.append(thread45)
threads.append(thread46)
threads.append(thread47)
threads.append(thread48)
threads.append(thread49)
threads.append(thread50)
threads.append(thread51)
threads.append(thread52)
threads.append(thread53)
threads.append(thread54)
threads.append(thread55)
threads.append(thread56)
threads.append(thread57)
threads.append(thread58)
threads.append(thread59)
threads.append(thread60)
threads.append(thread61)
threads.append(thread62)
threads.append(thread63)
threads.append(thread64)
threads.append(thread65)
threads.append(thread66)
threads.append(thread67)
threads.append(thread68)
threads.append(thread69)
threads.append(thread70)
threads.append(thread71)
threads.append(thread72)
threads.append(thread73)
threads.append(thread74)
threads.append(thread75)
threads.append(thread76)
threads.append(thread77)
threads.append(thread78)
threads.append(thread79)
threads.append(thread80)
threads.append(thread81)
threads.append(thread82)
threads.append(thread83)
threads.append(thread84)
threads.append(thread85)
threads.append(thread86)
threads.append(thread87)
threads.append(thread88)
threads.append(thread89)
threads.append(thread90)
threads.append(thread91)
threads.append(thread92)
threads.append(thread93)
threads.append(thread94)
threads.append(thread95)
threads.append(thread96)
threads.append(thread97)
threads.append(thread98)
threads.append(thread99)
threads.append(thread100)
threads.append(thread101)
threads.append(thread102)
threads.append(thread103)
threads.append(thread104)
threads.append(thread105)
threads.append(thread106)
threads.append(thread107)
threads.append(thread108)
threads.append(thread109)
threads.append(thread110)
threads.append(thread111)
threads.append(thread112)
threads.append(thread113)
threads.append(thread114)
threads.append(thread115)
threads.append(thread116)
threads.append(thread117)
threads.append(thread118)
threads.append(thread119)
threads.append(thread120)
threads.append(thread121)
threads.append(thread122)
threads.append(thread123)
threads.append(thread124)
threads.append(thread125)
threads.append(thread126)
threads.append(thread127)
threads.append(thread128)
threads.append(thread129)
threads.append(thread130)
threads.append(thread131)
threads.append(thread132)
threads.append(thread133)
threads.append(thread134)
threads.append(thread135)
threads.append(thread136)
threads.append(thread137)
threads.append(thread138)
threads.append(thread139)
threads.append(thread140)
threads.append(thread141)
threads.append(thread142)
threads.append(thread143)
threads.append(thread144)
threads.append(thread145)
threads.append(thread146)
threads.append(thread147)
threads.append(thread148)
threads.append(thread149)
threads.append(thread150)
threads.append(thread151)
threads.append(thread152)
threads.append(thread153)
threads.append(thread154)
threads.append(thread155)
threads.append(thread156)
threads.append(thread157)
threads.append(thread158)
threads.append(thread159)
threads.append(thread160)
threads.append(thread161)
threads.append(thread162)
threads.append(thread163)
threads.append(thread164)
threads.append(thread165)
threads.append(thread166)
threads.append(thread167)
threads.append(thread168)
threads.append(thread169)
threads.append(thread170)
threads.append(thread171)
threads.append(thread172)
threads.append(thread173)
threads.append(thread174)
threads.append(thread175)
threads.append(thread176)
threads.append(thread177)
threads.append(thread178)
threads.append(thread179)
threads.append(thread180)
threads.append(thread181)
threads.append(thread182)
threads.append(thread183)
threads.append(thread184)
threads.append(thread185)
threads.append(thread186)
threads.append(thread187)
threads.append(thread188)
threads.append(thread189)
threads.append(thread190)
threads.append(thread191)
threads.append(thread192)
threads.append(thread193)
threads.append(thread194)
threads.append(thread195)
threads.append(thread196)
threads.append(thread197)
threads.append(thread198)
threads.append(thread199)
threads.append(thread200)
threads.append(thread201)
threads.append(thread202)
threads.append(thread203)
threads.append(thread204)
threads.append(thread205)
threads.append(thread206)
threads.append(thread207)
threads.append(thread208)
threads.append(thread209)
threads.append(thread210)
threads.append(thread211)
threads.append(thread212)
threads.append(thread213)
threads.append(thread214)
threads.append(thread215)
threads.append(thread216)
threads.append(thread217)
threads.append(thread218)
threads.append(thread219)
threads.append(thread220)
threads.append(thread221)
threads.append(thread222)
threads.append(thread223)
threads.append(thread224)
threads.append(thread225)
threads.append(thread226)
threads.append(thread227)
threads.append(thread228)
threads.append(thread229)
threads.append(thread230)
threads.append(thread231)
threads.append(thread232)
threads.append(thread233)
threads.append(thread234)
threads.append(thread235)
threads.append(thread236)
threads.append(thread237)
threads.append(thread238)
threads.append(thread239)
threads.append(thread240)
threads.append(thread241)
threads.append(thread242)
threads.append(thread243)
threads.append(thread244)
threads.append(thread245)
threads.append(thread246)
threads.append(thread247)
threads.append(thread248)
threads.append(thread249)
threads.append(thread250)
threads.append(thread251)
threads.append(thread252)
for t in threads:
t.join()
print("Exiting Main Thread")
def test():
thread = myThread(1, thread_number_list[0])
thread.start()
thread.join()
if __name__ == '__main__':
date = datetime.today().strftime('%m/%d/%Y')
proxy_file = '/home/ubuntu/laptop/proxy.txt'
source_file = '/home/ubuntu/laptop/Filtered.txt'
thread_number_list = source_txt(path=source_file)
fileName = 'Filtered.csv'
pass_file = 'pass_phones_Filtered.csv'
failed_file = 'failed_Filtered.csv'
Not_Found = 'Not_Found_Filtered.csv'
pass_phones = read_phones()
proxy_list = get_proxy(proxy_file)
start_time = time.time()
main()
| true |
49303486220a5ff051e9823a4004577ae9171a67 | Python | lanxic/reverse-tunnel | /reverse-tunnel.py | UTF-8 | 2,501 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
import os
import sys, getopt
import json
from subprocess import check_output
version = '1.0'
filename = ''
develop = ''
def info_main():
print "Usage: %s [OPTIONS]" % os.path.basename(sys.argv[0])
print "example: %s -i config.json -u alex" % os.path.basename(sys.argv[0])
print ""
print('Where OPTIONS:')
print('-i config.json Config file for register ex: config.json')
print('-u developer Developer name ex: alex')
print ""
def read_config(arg):
filename = arg
try:
if os.path.exists(filename) == True:
txt = open(filename)
resp = json.loads(txt.read())
# print resp
else:
print "bonk give correct config_file"
except Exception as e:
print "Bonk...got error in read_config function"
def registry_dotunnel(filename,develop):
try:
txt = open(filename)
resp = json.loads(txt.read())
# get servet value
get_server = (resp['server'])
server = {}
server.update(get_server)
# GET developers value
get_developer = (resp['developers'])
developers = {}
developers.update(get_developer)
# Exit if developer name is not exists
if not developers.has_key(develop):
print "Developer name '%s' not found." % develop
exit(1)
currentDev = developers[develop]
currentServer = server['user'] + '@' + server['host']
print 'Creating tunnel for %s at %s (%s) to %s...' % (currentDev['name'], server['host'], currentDev['remote_bind'], currentDev['dev_bind'])
output = check_output(['ssh', '-N', '-R', currentDev['remote_bind'] + ':' + currentDev['dev_bind'], currentServer])
print output
exit(0)
except Exception as e:
print "Bonk...got error in registry_dotunnel function"
def main(argv):
if len(sys.argv) == 1:
info_main()
try:
opts, args = getopt.getopt(argv,"h:i:u:v")
except getopt.GetoptError:
info_main()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print "usage: %s -i <config_file.ini>" % os.path.basename(sys.argv[0])
sys.exit()
elif opt in ("-v"):
print 'Version:', version
elif opt in ("-i"):
filename = arg
read_config(filename)
elif opt in ("-u"):
develop = arg
registry_dotunnel(filename,develop)
if __name__ == "__main__":
main(sys.argv[1:])
| true |
2a2227fddffedaabf5caf1c14e8719a4a322af44 | Python | mpoyraz/Udacity-Data-Engineering-Nanodegree | /L3-Data-Lakes-With-Spark/P4-Data-Lake-With-Spark/etl.py | UTF-8 | 14,059 | 2.75 | 3 | [] | no_license | import argparse
import configparser
import os
from datetime import datetime
from pyspark import StorageLevel
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import udf, col, from_unixtime, row_number
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek
from pyspark.sql.types import StructType, StructField, StringType, LongType, DoubleType
# Parse the configuration file
config = configparser.ConfigParser()
config.read('dl.cfg')
# Set the environment variables for AWS access
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
# Log prefix for ETL job
log_prefix = "ETL SPARKIFY"
# Estimated number of partitions based on ~6 CPU cores and dataset size.
num_cpu = 6
num_partitions = 2 * num_cpu
def create_spark_session():
""" Creates Spark Session object with appropiate configurations.
Returns:
spark: Spark Session object
"""
print("{}: creating Spark Session...".format(log_prefix))
spark = SparkSession \
.builder \
.appName("ETL Sparkify") \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
# Speed up the file writing into S3
spark.conf.set("mapreduce.fileoutputcommitter.algorithm.version", "2")
# Set the log level
spark.sparkContext.setLogLevel('INFO')
# Set dataframe shuffle partitions
spark.conf.set("spark.sql.shuffle.partitions", num_partitions)
print("{}: Spark Session is created and ready for use".format(log_prefix))
return spark
def process_song_data(spark, input_data, output_data):
""" Reads song dataset from S3 and transfroms it into songs and artists tables,
songs and artist tables are written back to S3.
Args:
spark : Spark Session object
input_data (str): input S3 bucket path
output_data (str): output S3 bucket path
"""
# Get filepath to song data file
song_data_path = os.path.join(input_data, 'song_data/*/*/*/*.json')
print("{}: start processing {}".format(log_prefix, song_data_path))
# Define the schema for songs data files
schema_song = StructType([StructField('artist_id', StringType(), True),
StructField('artist_latitude', DoubleType(), True),
StructField('artist_longitude', DoubleType(), True),
StructField('artist_location', StringType(), True),
StructField('artist_name', StringType(), True),
StructField('duration', DoubleType(), True),
StructField('num_songs', LongType(), True),
StructField('song_id', StringType(), True),
StructField('title', StringType(), True),
StructField('year', LongType(), True)])
# Read song data file
df_song = spark.read.json(song_data_path, schema = schema_song)
for i in range(100):
print("df_song before: ", df_song.rdd.getNumPartitions())
# Repartition
df_song = df_song.repartition(num_partitions)
# Persist song dataframe for reuse
df_song.persist(StorageLevel.MEMORY_AND_DISK)
# Extract columns to create songs table
songs_table = df_song.select('song_id','title','artist_id','year','duration').dropDuplicates(['song_id'])
# Write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy('year','artist_id').parquet(os.path.join(output_data, 'songs'), mode='overwrite')
print("{}: songs table is written into S3".format(log_prefix))
# Extract columns to create artists table
artists_table = df_song.select('artist_id',
col('artist_name').alias('name'), \
col('artist_location').alias('location'), \
col('artist_latitude').alias('latitude'), \
col('artist_longitude').alias('longitude') \
).dropDuplicates(['artist_id'])
# Write artists table to parquet files
artists_table.write.parquet(os.path.join(output_data, 'artists'))
print("{}: artists table is written into S3".format(log_prefix))
# Unpersist song dataframe
df_song.unpersist()
print("{}: song dataset processing is finished".format(log_prefix))
def process_log_data(spark, input_data, output_data):
""" Reads log dataset from S3 and transfroms it into users, time & songplays tables,
finally these tables are written back to S3.
Args:
spark : Spark Session object
input_data (str): input S3 bucket path
output_data (str): output S3 bucket path
"""
# Get filepath to log data file
log_data_path = os.path.join(input_data, 'log-data/*/*/*.json')
print("{}: start processing {}".format(log_prefix, log_data_path))
# Define the schema for log data files
schema_log = StructType([StructField('artist', StringType(), True),
StructField('auth', StringType(), True),
StructField('firstName', StringType(), True),
StructField('gender', StringType(), True),
StructField('itemInSession', LongType(), True),
StructField('lastName', StringType(), True),
StructField('length', DoubleType(), True),
StructField('level', StringType(), True),
StructField('location', StringType(), True),
StructField('method', StringType(), True),
StructField('page', StringType(), True),
StructField('registration', StringType(), True),
StructField('sessionId', LongType(), True),
StructField('song', StringType(), True),
StructField('status', LongType(), True),
StructField('ts', LongType(), True),
StructField('userAgent', StringType(), True),
StructField('userId', StringType(), True)])
# Read log data file
df_log = spark.read.json(log_data_path, schema = schema_log)
# Repartition
df_log = df_log.repartition(num_partitions)
# Persist logs dataframe for reuse
df_log.persist(StorageLevel.MEMORY_AND_DISK)
# Filter by actions for song plays
df_log_nextSong = df_log.filter(df_log.userId.isNotNull()).filter(df_log.page == 'NextSong')
# Extract columns for users table
users_latest_state = df_log_nextSong.groupBy('userId').max('ts') \
.select("userId", col("max(ts)").alias("ts"))
users_table = df_log_nextSong.join(users_latest_state, on = ['userId','ts']) \
.select(col('userId').alias('user_id'), \
col('firstName').alias('first_name'), \
col('lastName').alias('last_name'), \
'gender', 'level')
# Write users table to parquet files
users_table.write.parquet(os.path.join(output_data, 'users'))
print("{}: users table is written into S3".format(log_prefix))
# Create datetime column from original timestamp column
convert_ms_to_s = udf(lambda x: x//1000, LongType())
df_timestamp = df_log_nextSong.select(col('ts').alias('start_time')).dropDuplicates()
df_timestamp = df_timestamp.withColumn("datetime", from_unixtime(convert_ms_to_s(df_timestamp.start_time)))
# Extract columns to create time table
time_table = df_timestamp.withColumn("hour", hour("datetime")) \
.withColumn("day", dayofmonth("datetime")) \
.withColumn("week", weekofyear("datetime")) \
.withColumn("month", month("datetime")) \
.withColumn("year", year("datetime")) \
.withColumn("weekday", dayofweek("datetime")) \
.drop('datetime')
# Write time table to parquet files partitioned by year and month
time_table.write.partitionBy('year','month').parquet(os.path.join(output_data, 'time'))
print("{}: time table is written into S3".format(log_prefix))
# Read in songs & artists tables to use for songplays table
songs_table = spark.read.parquet(os.path.join(output_data, 'songs'))
artists_table = spark.read.parquet(os.path.join(output_data, 'artists'))
# Extract columns from joined song and log datasets to create songplays table
songplays_table = df_log_nextSong.join(songs_table, df_log_nextSong.song == songs_table.title) \
.join(artists_table, df_log_nextSong.artist == artists_table.name) \
.join(time_table, df_log_nextSong.ts == time_table.start_time) \
.select(df_log_nextSong.ts.alias('start_time'), \
df_log_nextSong.userId.alias('user_id'), \
df_log_nextSong.level, \
songs_table.song_id, \
artists_table.artist_id, \
df_log_nextSong.sessionId.alias('session_id'), \
df_log_nextSong.location, \
df_log_nextSong.userAgent.alias('user_agent'), \
time_table.year, \
time_table.month ) \
.withColumn('songplay_id', row_number().over(Window().orderBy('song_id')))
# Write songplays table to parquet files partitioned by year and month
songplays_table.write.partitionBy('year','month').parquet(os.path.join(output_data, 'songplays'))
# Unpersist song dataframe
df_log.unpersist()
print("{}: logs dataset processing is finished".format(log_prefix))
def generate_s3_bucket_name(prefix = "sparkify-analytics"):
""" Creates a unique S3 bucket name with given prefix and current timestamp.
Args:
prefix (str) : Spark Session object
Returns:
bucket_name (str): S3 compatabile bucket name
"""
dt = datetime.now()
dt_formatted = "{}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}".format(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
bucket_name = "{}-{}".format(prefix, dt_formatted)
print("The output bucket name for S3: {}".format(bucket_name))
return bucket_name
def create_s3_bucket(bucket_name):
""" Creates S3 bucket with the given name using boto3 library.
The bucket is created on AWS region provided in the config file.
Args:
bucket_name (str) : S3 bucket name to create
"""
import boto3
s3 = boto3.resource('s3',
region_name = config['AWS']['REGION'],
aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
)
try:
response = s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': config['AWS']['REGION'],
},
)
except Exception as e:
if 'BucketAlreadyOwnedByYou' in str(e):
print("The bucket '{}' is already own by you".format(bucket_name))
elif 'BucketAlreadyExists' in str(e):
print("The bucket '{}' already exists")
else:
raise(e)
else:
print("The bucket name '{}' is successfully created on S3".format(bucket_name))
def main(args):
""" Performs ETL on song and user logs datasets to create analytics tables on S3.
Args:
args.bucket (str) : Existing S3 bucket name to store analytics tables
args.local (boolean) : Local testing, creates a S3 bucket if not specified
"""
# S3 bucket name for input data
input_data = "s3a://udacity-dend/"
# S3 bucket name for output data
if args.bucket is None:
print("{}: S3 bucket name is not specified as command-line arguments".format(log_prefix))
if args.local:
print("{}: creating an S3 bucket using boto3 library".format(log_prefix))
output_bucket = generate_s3_bucket_name()
# create the S3 bucket to store fact/dimentional tables
try:
create_s3_bucket(bucket_name=output_bucket)
except Exception as e:
print(e)
return
else:
print("{}: will try to read S3 bucket name from config file".format(log_prefix))
output_bucket = config['AWS']['S3_BUCKET_NAME']
else:
output_bucket = args.bucket
print("{}: S3 bucket name for output tables: {}".format(log_prefix, output_bucket))
output_data = "s3a://{}/".format(output_bucket)
# create the spark session
spark = create_spark_session()
# process the song and user log files on S3
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
print("{}: the ETL job is finished".format(log_prefix))
spark.stop()
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Sparkify ETL")
parser.add_argument("--bucket", type=str, help="S3 bucket name to store output tables")
parser.add_argument("--local", help="Local testing, creates a S3 bucket if not specified", action="store_true")
args = parser.parse_args()
main(args)
| true |
4b67f94271df2810c6fa7f8bd4715228cd9fc974 | Python | SunghyunChoi/Algorithm | /정글/WEEK07/14425.py | UTF-8 | 1,151 | 3.65625 | 4 | [] | no_license |
import sys
from collections import defaultdict
r = sys.stdin.readline
class Node(object):
def __init__(self,key, data=None):
self.key = key
self.data = data
self.children = {}
class Trie(object):
def __init__(self):
self.head = Node(None)
def insert(self, string):
curr_node = self.head
for char in string:
if char not in curr_node.children.keys():
curr_node.children[char] = Node(char)
curr_node = curr_node.children[char]
curr_node.data = string
return True
def search(self, string):
curr_node = self.head
for char in string:
if char not in curr_node.children.keys():
return False
curr_node = curr_node.children[char]
if curr_node.data :
return True
in_string, search_string = map(int, r().split())
newTrie = Trie()
answer = 0
for _ in range(in_string):
string = r().strip()
newTrie.insert(string)
for _ in range(search_string):
string = r().strip()
if (newTrie.search(string)):
answer += 1
print(answer) | true |
722ee2cfc3b0876123616242006b96fbb192ddbc | Python | IvanDanyliv/python | /lab7_1.py | UTF-8 | 218 | 3.84375 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
string = input('Input string: ')
num = int(input('Input number: '))
first = string[0 : num]
second = string[num:]
shifted_string = second + first
print(shifted_string)
| true |
225bbe55be912aa1f3cc851c6ad3c8944542a9bd | Python | vyashub/coronaIndia | /relationship_server.py | UTF-8 | 6,043 | 2.640625 | 3 | [
"MIT"
] | permissive | from flask import Flask, request, jsonify, abort
import spacy
from spacy.tokens import Span
from spacy.tokens import Token
import functools
import re
import json
import urllib.request
import logging
nlp = spacy.load("en_core_web_lg")
logger = logging.getLogger(__name__)
def make_dict_lowercase(d):
"""
Utliity method to convert keys and values in a dictionary `d` to lowercase.
Args:
`d` (:obj:`dict`): dictionary whose key and values have to be converted into lowercase
Returns:
`lower_case_dict` that is a copy of `d` but with the key and value converted to lowercase
"""
lower_case_dict = dict()
for k in d.keys():
lower_case_dict[k.lower()] = d[k].lower()
return lower_case_dict
def load_country_acryonym_json(
download_url: str = "https://raw.githubusercontent.com/rohanrmallya/coronaIndia/master/data/countries_acronym_aliases_flattened.json",
) -> None:
"""
Loading JSON that has alias / acronym to country name mapping.
Args:
download_url (:obj:`str`, optional): The URL from where the .json containing the alias-to-country mapping can be fetched.
Returns:
json converted to :obj:`dict` if the `download_url` could be fetched and read, None otherwise.
"""
with urllib.request.urlopen(download_url) as url:
return json.loads(url.read().decode()) if url.getcode() == 200 else {}
country_acronym_lookup = make_dict_lowercase(load_country_acryonym_json())
def acronym_to_country(acronym):
"""
Retrieve country name from `acronym` using `country_acronym_lookup` as reference
Args:
acryonym (:obj:`str`): acronym for which a country has to be searched
Returns:
str: the `country` mapped to `acronym` if such a mapping is found.
the `acronym` if no mapping is found
"""
country = country_acronym_lookup.get(acronym.lower())
return country.title() if country != None else acronym.title()
with urllib.request.urlopen(
"https://raw.githubusercontent.com/bhanuc/indian-list/master/state-city.json"
) as url:
state_city = json.loads(url.read().decode())
l = ["India", "Mumbai"]
for k, v in state_city.items():
l.append(k)
l = l + v
l = [ele.replace("*", "") for ele in l]
def get_travel_status(span):
if span.label_ == "GPE":
prev_token = span.doc[span.start - 1]
if prev_token.text in ("from", "through", "via", "Via"):
return "from"
elif prev_token.text in ("to", "and"):
return "to"
return None
def get_nat(span):
if span.label_ == "NORP":
return span.text
def get_rel(token):
if token.text == "of":
prev_token = token.doc[token.i - 1]
prev2 = None
if token.i > 2:
prev2 = token.doc[token.i - 2]
if prev2.text.lower() == "and" and str(token.doc[token.i - 3])[0] != "P":
return f"{token.doc[token.i - 3]} {token.doc[token.i - 2]} {token.doc[token.i - 1]}"
if prev_token.text.lower() in ("members", "member"):
return "Family Member"
else:
return prev_token.text
def extract_relationship(doc):
ids = []
output = []
for tok in doc:
if tok._.relationship:
ids.append(tok.i + 1)
ids.append(doc.__len__())
for i in range(len(ids) - 1):
w = re.findall("P[0-9]+", str(doc[ids[i] : ids[i + 1]]))
output.append({"link": doc[ids[i] - 1]._.relationship, "with": w})
return output
def extract_travel_place(doc):
travel = []
for ent in doc.ents:
if ent._.travel_status:
travel.append(ent.text)
return list(map(acronym_to_country, travel))
def extract_nationality(doc):
nat = []
for ent in doc.ents:
if ent._.nationality:
nat.append(ent._.nationality)
return nat
def extract_foreign(doc):
is_foreign = []
for ent in doc.ents:
if ent._.travel_status:
is_foreign.append(
{
"place": acronym_to_country(ent.text),
"is_foreign": not (ent.text in l),
}
)
return is_foreign
Span.set_extension("travel_status", getter=get_travel_status, force=True)
Span.set_extension("nationality", getter=get_nat, force=True)
Token.set_extension("relationship", getter=get_rel, force=True)
app = Flask(__name__)
default_result = {
"nationality": [],
"travel": [],
"relationship": [],
"place_attributes": [],
}
@functools.lru_cache(30000)
def record_processor(sent):
logger.info(f"Travel Input: {sent}")
if not sent:
return default_result
s = re.sub(r"[^\w\s]", " ", sent)
doc = nlp(s)
return {
"nationality": extract_nationality(doc),
"travel": extract_travel_place(doc),
"relationship": extract_relationship(doc),
"place_attributes": extract_foreign(doc),
}
def process_records(records):
history = []
for r in records["patients"]:
if not ("notes" in r.keys()):
history.append(default_result)
logger.info(f"ಥ_ಥ Missing Notes")
else:
history.append({r["patientId"]: record_processor(r["notes"])})
logger.info(
f"Travel Output : {r['patientId']}: {record_processor(r['notes'])}"
)
return {"patients": history}
@app.route("/", methods=["POST"])
def single():
try:
req_data = request.get_json()
results = process_records(req_data)
except TypeError:
logger.info(f"ಠ~ಠ TypeError Aborting")
logger.info(f"Error Data : {req_data}")
abort(400)
except KeyError:
logger.info(f"ಠ╭╮ಠ KeyError Aborting")
logger.info(f"Error Data : {req_data}")
return jsonify(error="Not the correct request format!")
return results
#if __name__ == "__main__":
# app.run()
# app.run() | true |
4eea41627e24a09a3defe62166496afe08f3436d | Python | iamashu/Data-Camp-exercise-PythonTrack | /part8-pandas-review/No09-Bachelors-degrees-awarded-to-women.py | UTF-8 | 4,417 | 4.09375 | 4 | [] | no_license | #Bachelor's degrees awarded to women
'''
In this exercise, you will investigate statistics of the percentage of Bachelor's degrees awarded to women from 1970 to 2011. Data is recorded every year for 17 different fields. This data set was obtained from the Digest of Education Statistics.
Your job is to compute the minimum and maximum values of the 'Engineering' column and generate a line plot of the mean value of all 17 academic fields per year. To perform this step, you'll use the .mean() method with the keyword argument axis='columns'. This computes the mean across all columns per row.
The DataFrame has been pre-loaded for you as df with the index set to 'Year'.
#Instructions
100 XP
Print the minimum value of the 'Engineering' column.
Print the maximum value of the 'Engineering' column.
Construct the mean percentage per year with .mean(axis='columns'). Assign the result to mean.
Plot the average percentage per year. Since 'Year' is the index of df, it will appear on the x-axis of the plot. No keyword arguments are needed in your call to .plot().
'''
# Code
# Print the minimum value of the Engineering column
print(df['Engineering'].min())
# Print the maximum value of the Engineering column
print(df['Engineering'].max())
# Construct the mean percentage per year: mean
mean = df.mean(axis='columns')
# Plot the average percentage per year
mean.plot()
#df.plot()
# Display the plot
plt.show()
'''result
0.8
19.0
'''
'''
In [6]: df.head()
Out[6]:
Agriculture Architecture Art and Performance Biology Business ... Math and Statistics Physical Sciences Psychology Public Administration Social Sciences and History
Year ...
1970 4.229798 11.921005 59.7 29.088363 9.064439 ... 38.0 13.8 44.4 68.4 36.8
1971 5.452797 12.003106 59.9 29.394403 9.503187 ... 39.0 14.9 46.2 65.5 36.2
1972 7.420710 13.214594 60.4 29.810221 10.558962 ... 40.2 14.8 47.6 62.6 36.1
1973 9.653602 14.791613 60.2 31.147915 12.804602 ... 40.9 16.5 50.4 64.3 36.4
1974 14.074623 17.444688 61.9 32.996183 16.204850 ... 41.8 18.2 52.6 66.1 37.3
[5 rows x 17 columns]
In [7]: df.mean()
Out[7]:
Agriculture 33.848165
Architecture 33.685540
Art and Performance 61.100000
Biology 49.429864
Business 40.653471
Communications and Journalism 56.216667
Computer Science 25.809524
Education 76.356236
Engineering 12.892857
English 66.186680
Foreign Languages 71.723810
Health Professions 82.983333
Math and Statistics 44.478571
Physical Sciences 31.304762
Psychology 68.776190
Public Administration 76.085714
Social Sciences and History 45.407143
dtype: float64
In [9]: df.mean(axis='columns') #same as df.mean(axis=1)
Out[9]:
Year
1970 38.594697
1971 38.603481
1972 39.066075
1973 40.131826
1974 41.715916
1975 42.373672
1976 44.015581
1977 45.673823
1978 47.308670
1979 48.811798
1980 49.980583
1981 50.974090
1982 52.009448
1983 52.187399
1984 52.474007
1985 52.399548
1986 52.752830
1987 53.169798
1988 53.130635
1989 53.305542
1990 53.737364
1991 53.471622
1992 53.262399
1993 53.199202
1994 53.238427
1995 53.508401
1996 53.941559
1997 54.446953
1998 55.227195
1999 55.971538
2000 56.501939
2001 56.946913
2002 57.181722
2003 57.367542
2004 57.019094
2005 56.723782
2006 56.262194
2007 56.053781
2008 55.903924
2009 56.026406
2010 55.883043
2011 55.999587
dtype: float64
''' | true |
28c3fe43b249f17df282cd61b1988a91b143464f | Python | Nordlxnder/Beispiele | /Tkinter/Beispiel_canbus_init_und_Filter.py | ISO-8859-1 | 3,395 | 2.703125 | 3 | [] | no_license | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# coding: utf8
# damit es keine Fehlermeldungen durch Sonderzeichen gibt
import socket
import struct
import threading ,sys
import binascii
'''
!!!!! HINWEIS fuer CANSEND!!!!
CAN Send Beispiel fuer Standard und Extended
cansend can1 -i 0x1F 0x11 0x22 0x33 0x44 0x55 0x55 0x77 0x88
cansend can1 -e -i 0x1F334455 0x11 0x22 0x33 0x44 0x55 0x55 0x77 0x88
cansend can1 1F#1122334455667788
cansend can1 1F334455#1122334455667788
'''
def start_canbus():
can_interface = socket.socket(socket.AF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
# Name der Schnittstelle
interface = "vcan0"
try:
# Herstellen der Verbindung zur Schnittstelle
# "bind" benoetigt nur ein Argument deshalb wird
# die Schnittstelle in doppelte Klammern gesetzt
can_interface.bind((interface,))
print("Verbindung mit vcan0 wurde hergestellt!")
return (can_interface)
except OSError:
Warnmeldung="\n\
can0 konnte mit der Schnittstelle \n\
nicht verbunden werden! \n\
Bitte pruefen Sie ob die Treiber\n\
geladen sind!"
print(Warnmeldung)
def canbus_Botschaft_lesen(can_interface):
# .recv empfange Daten im Puffer in der Groesse von 16 Bytes
# Hinweis 16 ist nicht ausreichend falls die Botschaft
# im Format "extended" gesendet wird
can_pkt = can_interface.recv(16)
return(can_pkt)
def can_Botschaft_aufteilen(can_pkt):
# > < Byte-Reihenfolge
#
# Format C Type Python type Standard size
# I unsigned int integer 4
# B unsigned char integer 1
# x pad byte no value
# s char[] bytes
#
# 4Bytes 1 3Bytes 2Bytes 2Bytes 2Bytes 2Bytes
# ________ __ ______ ____ ____ ____ ____
# Botschaft: b' 64000000 08 000000 0001 0002 0003 0004'
#fmt = "<IB3x8s"
# print("Botschaft:", binascii.hexlify(can_pkt))
# Botschaft: b'64000000080000000001000200030004'
# 4 Werte a 16 bit
FMT = "<IB3x2s2s2s2s"
can_id, length, wert1, wert2, wert3, wert4 = struct.unpack(FMT, can_pkt)
# Anzahl der Werte in der Botschaft
Werteanzahl=int(float(length/2))
return (can_id, Werteanzahl, wert1, wert2, wert3, wert4)
def can_Filter():
# Der Filter pass wenn diese Bedingung erfllt ist
# <received_can_id> & mask == can_id & mask
# id = 0x64
# 000001100100 0x64
# &011111111111 &0x7FF
# _____________ ______
# =000001100100 = 0x64
# mask = 0x700 entspricht 0b011100000000 alle Botschaften im hex Bereich von 00 bis FF
# mask = 0x7FF entspricht 0b011111111111 es wird nur eine bestimmte Botschaft
# die ID besteht aus 11 bit damit 3 Hex Werte oder 000-7FF
# __7_ __F_ __F_
# mask = 0111 1111 1111
id = 0b000001100100
mask = 0b011111111111
can_interface.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER,struct.pack("II", id, mask))
if __name__=='__main__':
global can_interface
can_interface=start_canbus()
can_Filter()
Botschaft=canbus_Botschaft_lesen(can_interface)
B=can_Botschaft_aufteilen(Botschaft)
print(B)
print("Beispiel_canbus_init.py wurde ausgefuehrt", Botschaft)
| true |
31ede9e5677451d01d9369e23bb1b158dff83a20 | Python | kuangyl0212/PAAA | /src/test/resources/py-source-a/003py_source_file2.py | UTF-8 | 980 | 4.28125 | 4 | [] | no_license | """
2.(10分)在一个农场中,养了一群鸡和兔。一只鸡有两条腿,一只兔有四条腿。请编写程序,完成以下任务:
(1) 编写函数calculate(head, leg),其中形参head和leg分别表示农场中所有动物的头和脚的个数。通过该函数计算,在屏幕上显示鸡和兔的个数。
例如:向该函数传入实参10和32,屏幕显示结果为:
4 6
(注:4和6中间没有空格;表示鸡和兔的个数分别为4和6)
(2) 假设所有动物的头和脚的个数分别为12和32,调用(1)中的函数,打印鸡和兔的个数。
注:(1)题中动物个数需为整数。(2)编程题中的过程代码要同时写在纸质试卷上。
"""
def calculate(head,leg):
for headchik in range(0,head+1):
headrabbit=head - headchik
if headrabbit>=0:
elif 2*headchik+4*headrabbit==leg:
print(headchik,headrabbit)
print(calculate(12,32)
| true |
eaf24a125bb635f3d4755ea3789027b1bf25c50e | Python | mitta64/Group-A28-SVV-assignment | /Code/tools/readdata.py | UTF-8 | 415 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 14:16:34 2020
@author: chang
"""
import re
import numpy as np
xyzcoord = open("gridxyz.txt", "r")
correct = []
lines = xyzcoord.readlines()
x,y,z = [], [], []
for line in lines:
correct += filter(None,re.split(r'\W|\d', line))
# for i in thisline:
#
# x.append(thisline[0])
# y.append(thisline[1])
# z.append(thisline[2]) | true |
7dc6e96fc4bec1f8336b24406a8f8d0bfc9937c6 | Python | Apocilyptica/Python_notes_school | /Variables/Collections/Lists/query_processes.py | UTF-8 | 211 | 3.3125 | 3 | [] | no_license | tags = ['python', 'development', 'tutorials', 'code']
number_of_tags = len(tags)
last_item = tags[-1]
index_of_last_item = tags.index(last_item)
print(number_of_tags)
print(last_item)
print(index_of_last_item) | true |
8a7fe9dfc5985b32ef67ef8294126eb6e5cdd963 | Python | wwgrabowski/DP_currency_exchange | /tests/test_accounts.py | UTF-8 | 4,436 | 2.859375 | 3 | [] | no_license |
from unittest import TestCase
from unittest.mock import patch, Mock, MagicMock
from python.accounts import BasicAccount, CreditAccount, DebetAccount, InterestZero
class TestAccounts(TestCase):
def setUp(self):
class MockInterest:
def __init__(self):
self.interest = 0.1
def get_interest(self, value):
return value * self.interest
self.basic = BasicAccount('id_1', 1000, 'Wowo', MockInterest)
self.credit = CreditAccount(self.basic)
self.debet = DebetAccount(self.basic, max_debet=1000)
def test_get_id(self):
basic_id = self.basic.get_id()
self.assertEqual('id_1', basic_id)
def test_get_saldo(self):
saldo = self.basic.get_saldo()
self.assertEqual(saldo, 1000)
def test_change_saldo(self):
self.basic.change_saldo(-200)
current_saldo = self.basic.saldo
self.assertEqual(current_saldo, 800)
def test_change_saldo_to_zero(self):
self.basic.change_saldo(-1000)
current_saldo = self.basic.saldo
self.assertEqual(current_saldo, 0)
def test_change_saldo_under_zero(self):
self.assertRaises(ValueError, self.basic.change_saldo, -1200)
@patch.object(InterestZero, 'get_interest')
def test_add_interest_call(self, mock_get_interest):
mock_get_interest.return_value = 100
self.basic.add_interest()
saldo = self.basic.saldo
self.assertEqual(saldo, 1100)
def test_add_interest(self):
self.basic.add_interest()
saldo = self.basic.saldo
self.assertEqual(saldo, 1100)
def test_add_interest_to_zero(self):
self.basic.saldo = 0
self.basic.add_interest()
saldo = self.basic.saldo
self.assertEqual(saldo, 0)
def test_add_interest_on_debet(self):
self.debet.saldo = -200
self.assertRaises(ValueError, self.debet.add_interest())
saldo = self.debet.saldo
self.assertEqual(saldo, -200)
def test_get_owner(self):
owner = self.basic.get_owner()
self.assertEqual(owner, 'Wowo')
def test_debet_account(self):
id_before = self.basic.get_id()
debet = DebetAccount(self.basic, 1000)
id_after = debet.get_id()
self.assertEqual(id_before, id_after)
def test_credit_account(self):
id_before = self.basic.get_id()
credit = CreditAccount(self.basic)
id_after = credit.get_id()
self.assertEqual(id_before, id_after)
def test_multiple_decorators(self):
id_before = self.basic.get_id()
credit_debet = CreditAccount(DebetAccount(self.basic, 1000))
id_after = credit_debet.get_id()
self.assertEqual(id_before, id_after)
def test_get_credit_debt(self):
debt = self.credit.get_credit_debt()
self.assertEqual(debt, 0)
def test_take_credit(self):
self.credit.take_credit(1000)
debt = self.credit.get_credit_debt()
saldo = self.credit.get_saldo()
self.assertEqual(debt, 1000)
self.assertEqual(saldo, 2000)
def test_take_zero_credit(self):
self.assertRaises(ValueError, self.credit.take_credit(0))
debt = self.credit.get_credit_debt()
saldo = self.credit.get_saldo()
self.assertEqual(debt, 0)
self.assertEqual(saldo, 1000)
def test_take_negative_credit(self):
self.assertRaises(ValueError, self.credit.take_credit(-200))
debt = self.credit.get_credit_debt()
saldo = self.credit.get_saldo()
self.assertEqual(debt, 0)
self.assertEqual(saldo, 1000)
def test_debet_account_change_saldo(self):
self.debet.change_saldo(-1500)
saldo = self.debet.get_saldo()
self.assertEqual(saldo, -500)
def test_debet_account_max_debt(self):
self.debet.change_saldo(-2000)
saldo = self.debet.get_saldo()
self.assertEqual(saldo, -1000)
self.assertRaises(ValueError, self.credit.change_saldo, -1)
def test_debet_account_overdebt(self):
self.assertRaises(ValueError, self.credit.change_saldo, -2500)
def test_accept(self):
id = '1'
visitor = Mock()
visitor.visit_product_id = MagicMock(return_value=id)
product_id = self.basic.accept(visitor)
visitor.visit_product_id.assert_called()
self.assertEqual(id, product_id)
| true |
2745bdc4f1fab7cec897417a0c8e4a906139cff1 | Python | shiyanboxer/Web-Scraper-Deal-Sourcing | /Test/test.py | UTF-8 | 2,604 | 3.921875 | 4 | [] | no_license | # *** IMPORT LIBRARIES ***
from bs4 import BeautifulSoup
import requests
import csv
"""
OPEN HTML FILE - read in and pass in html file using lxml parser
with open('sample.html') as html_file:
# soup variable which is a BeautifulSoup object of our parsed html
soup = BeautifulSoup(html_file, 'lxml')
# EXAMPLE OF RETRIEVING TEXT
article = soup.find('div', class_='article')
headline = article.h2.a.text
summary = article.p.text
# PRINT ENTIRE FORMATED HTML PAGE
print(soup.prettify())
"""
# OPEN FILE FROM WEB USING REQUEST
# this will return a response object, and with the .text it will return an html equivalent
source = requests.get('http://coreyms.com').text
# soup variable which is a BeautifulSoup object of our parsed html
soup = BeautifulSoup(source, 'lxml')
# CSV FILE
# create a file and 'w', write to it
csv_file = open('test.csv', 'w')
# create an object, csv_writter
csv_writer = csv.writer(csv_file)
# use writerow method to write row and pass in headers
csv_writer.writerow(['headline', 'summary', 'video_link'])
# *** SOURCE INFO ***
"""
for loop that iterates over all articles in the page
article in this case contains the heading, description, and video link
use for loop and find_all method to iterate get all articles content in the page
"""
for article in soup.find_all('article'):
headline = article.h2.a.text
print(headline)
# find method finds searches for a div of specific class
# by adding p.text we get the content, if we didn't we would get the html
summary = article.find('div', class_='entry-content').p.text
print(summary)
# geting the link, the link is in an iframe under source
try:
# returns video link
vid_src = article.find('iframe', class_='youtube-player')['src'] # access source attribute
# use the split method to split the url based on values
vid_id = vid_src.split('/')[4] # get the 4th index
vid_id = vid_id.split('?')[0]
# f string to format the string
yt_link = f'https://youtube.com/watch?v={vid_id}'
# *** ERRORS ***
# in case there is missing content, use try except block
except Exception as e:
# copy paste everything in the youtube link block
yt_link = None # could not get youtube link so set to none
print(yt_link)
print() # black line
# *** WRITE AND CLOSE CSV ***
# write the data to CSV for every iteration
csv_writer.writerow([headline, summary, yt_link])
# outside of the loop, close the file
csv_file.close()
| true |
18b4df62239e551c4647ebede3516866443521d2 | Python | hoangtuyenblogger/nhap-mon-khoa-hoc-du-lieu | /15092020_String_list_bai_tap.py | UTF-8 | 753 | 3.96875 | 4 | [] | no_license | '''
#Bài 1
name = input("Nhap vao ho va ten: ")
print("Ten cua ban la: " + name[0:])
print("In nguoc ten cua ban: " + name[::-1])
print("-----------------------------------------")
#Bài 2
long = input("Nhập vào tên của Long :")
tuyen = input("Nhập vào tên của Tuyến :")
if len(long) < len(tuyen):
print(tuyen +" là best ok!")
print("-----------------------------------------")
#Bài 3
chuoi = input("Nhập vào dãy số( cách nhau bởi dấu phẩy) :")
list = chuoi.split(',');
sum =0
for i in list:
sum = sum + int(i)
print("Tổng s = ", sum)
print("-----------------------------------------")
'''
#Bài 4
my_str = input("Nhập vào chuỗi của bạn :")
print("Chuỗi của bạn sau khi in hoa: " + my_str.upper()) | true |
a267175e20df3bcc421b041fc49317a75f474e87 | Python | sankalp0210/BPlusTree-LinearHashing | /linear_hash.py | UTF-8 | 1,698 | 3.28125 | 3 | [] | no_license | import sys
class LinearHash:
def __init__(self):
'''Initialising the class variables.'''
self.keys = 0
self.total_blocks = 2
self.split_idx = 0
self.idx_hash = 1
self.bucks = {}
self.bucks[0] = self.bucks[1] = [[]]
def add_bucket(self):
'''Adding a new bucket'''
self.bucks[len(self.bucks.keys())] = [[]]
self.total_blocks += 1
if len(self.bucks.keys()) > 1<<(self.idx_hash+1):
self.idx_hash += 1
self.split_idx = 0
upd_idx = len(self.bucks.keys()) - 1<<self.idx_hash
self.total_blocks -= len(self.bucks[upd_idx])
to_update = [val for val in i for i in self.bucks[upd_idx]]
self.bucks[upd_idx] = [[]]
self.total_blocks += 1
self.split_idx += 1
for val in to_update:
hash_val = val % (1<<self.idx_hash)
if self.split_idx > hash_val:
hash_val = val % (1<<(self.idx_hash+1))
if len(self.bucks[hash_val][-1]) >= 250:
self.total_blocks+=1
self.bucks[hash_val].append([])
self.bucks[hash_val][-1].append(val)
def insert(self, val):
'''Inserting into the hash table'''
hash_val = val % (1<<self.idx_hash)
if self.split_idx > hash_val:
hash_val = val % (1<<(self.idx_hash+1))
if any(val in i for i in self.bucks[hash_val]):
return 1
self.keys += 1
if len(self.bucks[hash_val][-1]) >= 250:
self.total_blocks += 1
self.bucks[hash_val].append([])
print(val)
self.bucks[hash_val][-1].append(val)
if self.keys / (self.total_blocks * 250.0) > 0.75:
self.add_bucket()
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: python file.py input_file")
hash_obj = LinearHash()
with open(sys.argv[1], 'r') as f:
for line in f:
hash_obj.insert(int(line.strip()))
| true |
8f331f5f6505778f1758cd40abc97e9926c48ccb | Python | hadikomeili/online_market-django- | /product/tests.py | UTF-8 | 4,336 | 2.578125 | 3 | [] | no_license | from django.test import TestCase
from datetime import datetime, timedelta
from .models import *
# Create your tests here.
class ProductModelTest(TestCase):
def setUp(self) -> None:
self.cat1 = Category.objects.create(name='test_cat')
self.discount1 = Discount.objects.create(type='cash', value=1000, start_time=datetime.now(),
expire_time=(datetime.now() + timedelta(days=2)))
self.p1 = Product.objects.create(name='test_p1', company_brand='test_brand', category=self.cat1,
discount=self.discount1, price=15000, inventory=120,
specifications='international product')
def test_create_product(self):
self.p1.save()
self.assertIn(self.p1, Product.objects.all())
def test_deleted_product(self):
self.p1.deleted = True
self.p1.save()
self.assertIn(self.p1, Product.objects.archive())
self.assertNotIn(self.p1, Product.objects.all())
def test_foreign_key_category(self):
self.p1.save()
self.assertIn(self.p1.category, Category.objects.all())
def test_foreign_key_discount(self):
self.p1.save()
self.assertIn(self.p1.discount, Discount.objects.all())
def test_be_positive_price(self):
self.p1.save()
self.assertTrue(self.p1.price > 0)
def test2_be_positive_price(self):
self.p1.price = -100
self.p1.save()
self.assertFalse(self.p1.price > 0)
def test_price_greater_than_discount(self):
self.p1.save()
if self.p1.discount.type == 'cash':
self.assertTrue(self.p1.price > self.p1.discount.value)
else:
self.assertTrue(self.p1.discount.value < 100)
def test_be_positive_inventory(self):
self.p1.save()
self.assertTrue(self.p1.inventory >= 0)
class CategoryModelTest(TestCase):
def setUp(self) -> None:
self.cat1 = Category.objects.create(name='test_cat')
self.cat2 = Category.objects.create(name='test2_cat', ref_category=self.cat1)
def test_create_category(self):
self.cat1.save()
self.assertIn(self.cat1, Category.objects.all())
def test_deleted_category(self):
self.cat1.deleted = True
self.cat1.save()
self.assertIn(self.cat1, Category.objects.archive())
self.assertNotIn(self.cat1, Category.objects.all())
def test_self_relation(self):
self.cat1.save()
self.cat2.save()
self.assertIn(self.cat1, Category.objects.all())
self.assertIn(self.cat2, Category.objects.all())
self.assertIsInstance(self.cat1, Category)
self.assertIsInstance(self.cat2, Category)
class DiscountModelTest(TestCase):
def setUp(self) -> None:
self.discount1 = Discount.objects.create(type='cash', value=1000, start_time=datetime.now(),
expire_time=(datetime.now() + timedelta(days=2)))
self.discount2 = Discount.objects.create(type='percent', value=20, start_time=datetime.now(),
expire_time=(datetime.now() + timedelta(days=3)))
def test_create_discount(self):
self.discount1.save()
self.assertIn(self.discount1, Discount.objects.all())
def test_deleted_category(self):
self.discount1.deleted = True
self.discount1.save()
self.assertIn(self.discount1, Discount.objects.archive())
self.assertNotIn(self.discount1, Discount.objects.all())
def test_be_positive_discount(self):
self.discount1.save()
self.discount2.save()
self.assertTrue(self.discount1.value >= 0)
self.assertTrue(self.discount2.value >= 0)
def test2_be_positive_discount(self):
self.discount2.value = -10
self.discount2.save()
self.assertFalse(self.discount2.value >= 0)
def test_type_percent_lesser_than_hundred(self):
self.discount2.save()
if self.discount2.type == 'percent':
self.assertTrue(self.discount2.value < 100)
def test_expire_time_greater_than_start_time(self):
self.discount2.save()
self.assertTrue(self.discount2.expire_time > self.discount2.start_time) | true |
c4e86cbea4902f4149efa986fd99b1c3fdb14814 | Python | thunderz99/keras-fruit-gan | /fruit-cond-gan.py | UTF-8 | 10,889 | 2.75 | 3 | [] | no_license | """
Conditional MLP GAN to generate fruit images.
"""
import sys
import os
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.models import load_model
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from keras.layers import BatchNormalization, Activation, Embedding, concatenate
from keras.layers import ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
class FruitCondGanModel:
def __init__(self, image_size=48):
# hyper parameter
self.latent_dim = 100
self.img_rows = image_size
self.img_cols = image_size
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.model_dir = "saved_models"
dataset = "fruit"
if(dataset == "mnist"):
# Load the dataset
(self.x_train, self.y_train), (_, _) = mnist.load_data()
self.x_train = np.expand_dims(self.x_train, axis=3)
self.num_classes = 10
self.channels = 1
self.img_rows = 28
self.img_cols = 28
else:
# prepare train data
self.prepare_train_data()
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
noise = Input(shape=(100,))
label = Input(shape=(1,))
img = self.generator([noise, label])
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
# and the label of that image
valid = self.discriminator([img, label])
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model([noise, label], valid)
self.combined.compile(loss=['binary_crossentropy'],
optimizer=optimizer)
def build_discriminator(self):
model = Sequential()
model.add(Dense(512, input_dim=(np.prod(self.img_shape) * 2)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes,
np.prod(self.img_shape))(label))
flat_img = Flatten()(img)
print("D flat_img.shape:", flat_img.shape)
print("D label_embedding.shape:", label_embedding.shape)
# model_input = multiply([flat_img, label_embedding])
model_input = concatenate([flat_img, label_embedding])
print("D model_input.shape:", model_input.shape)
validity = model(model_input)
return Model([img, label], validity)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim * 2))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes,
self.latent_dim)(label))
print("noise.shape:", noise.shape)
print("label_embedding.shape:", label_embedding.shape)
# model_input = multiply([noise, label_embedding])
model_input = concatenate([noise, label_embedding])
print("model_input.shape:", model_input.shape)
img = model(model_input)
return Model([noise, label], img)
def train(self, epochs, batch_size=128, sample_interval=50):
(X_train, y_train) = self.x_train, self.y_train
# Configure input
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
y_train = y_train.reshape(-1, 1)
print("X_train.shape", X_train.shape)
print("y_train.shape", y_train.shape)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs, labels = X_train[idx], y_train[idx]
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, 100))
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, labels])
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(
[imgs, labels], valid)
d_loss_fake = self.discriminator.train_on_batch(
[gen_imgs, labels], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Condition on labels
sampled_labels = np.random.randint(
0, self.num_classes, batch_size).reshape(-1, 1)
# Train the generator
g_loss = self.combined.train_on_batch(
[noise, sampled_labels], valid)
# Plot the progress
print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
(epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
self.discriminator.save(
self.model_dir + "/" + "discriminator.h5")
self.generator.save(self.model_dir + "/" + "generator.h5")
self.combined.save(self.model_dir + "/" + "combined.h5")
def sample_images(self, epoch):
r, c = 1, self.num_classes
noise = np.random.normal(0, 1, (r * c, 100))
sampled_labels = np.arange(0, self.num_classes).reshape(-1, 1)
gen_imgs = self.generator.predict([noise, sampled_labels])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
upper_limit = np.vectorize(lambda x: 1 if x > 1 else x)
under_limit = np.vectorize(lambda x: 0 if x < 0 else x)
gen_imgs = upper_limit(gen_imgs)
gen_imgs = under_limit(gen_imgs)
# plt.imshow(gen_imgs[1, :, :, 0], cmap='gray')
# plt.show()
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
if(r == 1):
axs_i_j = axs[j]
else:
axs_i_j = axs[i, j]
axs_i_j.imshow(gen_imgs[cnt])
axs_i_j.set_title(
"%s" % self.categories[sampled_labels[cnt][0]])
axs_i_j.axis('off')
cnt += 1
fig.savefig("images/%d.png" % epoch)
plt.close()
def _load_image_to_array(self, input_dir):
x = []
y = []
categories = []
# ./data/train または ./data/test 以下のカテゴリの取得
for dir_name in os.listdir(input_dir):
if dir_name == ".DS_Store":
continue
categories.append(dir_name)
for idx, category in enumerate(categories):
category_dir = input_dir + "/" + category
print("---category dir:", category_dir)
for file in os.listdir(category_dir):
if file != ".DS_Store" and file != ".keep":
filepath = category_dir + "/" + file
image = self.preprocess_image(filepath)
# 出来上がった配列をimage_listに追加。
x.append(image)
# 配列label_listに正解ラベルを追加(0,1,2...)
y.append(idx)
# kerasに渡すためにnumpy配列に変換。
x = np.array(x)
# ラベルの配列をone hotラベル配列に変更
# 0 -> [1,0,0,0], 1 -> [0,1,0,0] という感じ。
if(len(y) > 0):
y = to_categorical(y)
return (x, y, categories)
def prepare_train_data(self):
# 学習用のデータを作る.
self.x_train = []
self.y_train = []
self.x_test = []
self.y_test = []
self.categories = []
(self.x_train, self.y_train,
self.categories) = self._load_image_to_array("data/train")
self.num_classes = self.y_train.shape[1]
print("prepare: self.num_classes:", self.num_classes)
# if subtract_pixel_mean:
# self.x_train_mean = np.mean(self.x_train, axis=0)
# self.x_train -= self.x_train_mean
# self.x_test -= self.x_train_mean
print('x_train shape:', self.x_train.shape)
print('y_train shape:', self.y_train.shape)
def preprocess_image(self, filepath):
# 画像を48 x 48(pixel設定可) x channel のnp_arrayに変換
image = Image.open(filepath)
image = np.array(image.resize((self.img_rows, self.img_cols)))
image = image.reshape(self.img_rows, self.img_cols, self.channels)
print("preprocess, image.shape:", image.shape)
return image
def load(self, filepath):
self.model = load_model(filepath)
if __name__ == '__main__':
cgan = FruitCondGanModel(image_size=48)
cgan.train(epochs=20000, batch_size=32, sample_interval=200)
| true |
0afff894f68e334dc1da2d6606fbda9f2279fcf1 | Python | dhruvvyas951/MEAN-MEDIAN-MODE | /MMM.py | UTF-8 | 1,450 | 3.40625 | 3 | [] | no_license | # imports
import csv
from collections import Counter
# local data
with open('Graph Visualization/Height-Weight.csv', newline = '') as f:
reader = csv.reader(f)
file_data = list(reader)
file_data.pop(0)
new_data = []
for i in range(len(file_data)):
n_num = file_data[i][1]
new_data.append(float(n_num))
n = len(new_data)
total = 0
# for mean
for x in new_data:
total += x
mean = total / n
# for median
if n % 2 == 0:
median1 = float(new_data[n//2])
median2 = float(new_data[n//2 - 1])
median = (median1 + median2)/2
else:
median = new_data[n//2]
print(n)
# for mode
data = Counter(new_data)
mode_data_range = {
'50-60': 0,
'60-70': 0,
'70-80': 0
}
for height, occurrence in data.items():
if 50 < float(height) < 60:
mode_data_range['50-60'] += occurrence
elif 60 < float(height) < 70:
mode_data_range['60-70'] += occurrence
elif 70 < float(height) < 80:
mode_data_range['70-80'] += occurrence
mode_range, mode_occurrence = 0,0
for range, occurrence in mode_data_range.items():
if occurrence > mode_occurrence:
mode_range, mode_occurrence = [int(range.split('-')[0]), int(range.split('-')[1])], occurrence
mode = float((mode_range[0] + mode_range[1])/2)
# results
print('Mean -> ' + str(mean))
print("Median -> " + str(median))
print(f"Mode -> {mode: 2f}") | true |
64460f4fea961b45830c98d6498dda373ffe9868 | Python | rpask00/graphs_theory | /Elementary Math - Network Flow.py | UTF-8 | 3,289 | 3.0625 | 3 | [] | no_license |
import sys
class Graph:
verticies = {}
nodesCount = 0
edges = []
class Vertex:
def __init__(self, label):
self.label = label
self.edges = []
self.visitedToken = 0
class Edge:
residual = None
def __init__(self, from_, to_, isResidual, maxCapacity):
self.from_ = from_
self.to_ = to_
self.isResidual = isResidual
self.capacity = maxCapacity
self.flow = 0
def augment(self, bootleneck):
self.flow += bootleneck
self.residual.flow -= bootleneck
def remainingCapacity(self):
return self.capacity - self.flow
def addEdge(self, from_, to_, capacity):
key = f'{from_},{to_}'
if key in self.edges:
return
if from_ not in self.verticies:
self.addVertex(from_)
if to_ not in self.verticies:
self.addVertex(to_)
from_ = self.verticies[from_]
to_ = self.verticies[to_]
main = self.Edge(from_, to_, False, capacity)
residual = self.Edge(to_, from_, True, 0)
main.residual = residual
residual.residual = main
from_.edges.append(main)
to_.edges.append(residual)
self.edges.append(key)
def addVertex(self, label):
self.nodesCount += 1
self.verticies[label] = self.Vertex(label)
def maxFlow(f, t):
f = g.verticies[f]
t = g.verticies[t]
visitedToken = 1
flow = 0
connections = {}
def dfs(node, bootleneck=sys.maxsize):
node.visitedToken = visitedToken
bootleneck_backup = bootleneck
if node == t:
return bootleneck
for edge in node.edges:
if edge.remainingCapacity() == 0 or edge.to_.visitedToken == visitedToken:
continue
bootleneck = dfs(edge.to_, min(
bootleneck, edge.remainingCapacity()))
if bootleneck:
if str(edge.from_.label).count(','):
connections[edge.from_.label] = edge.to_.label
edge.augment(bootleneck)
return bootleneck
else:
bootleneck = bootleneck_backup
return 0
while True:
bootleneck = dfs(f)
if not bootleneck:
break
flow += bootleneck
visitedToken += 1
return (flow, connections)
g = Graph()
g.addEdge(2, 1, 1)
rsults = []
t = int(input())
for _ in range(t):
a, b = tuple(map(int, input().split(' ')))
g.addEdge('source', f'{a},{b}', 1)
g.addEdge(f'{a},{b}', a+b, 1)
g.addEdge(a+b, 'sink', 1)
g.addEdge(f'{a},{b}', a-b, 1)
g.addEdge(a-b, 'sink', 1)
g.addEdge(f'{a},{b}', a*b, 1)
g.addEdge(a*b, 'sink', 1)
flow, connections = maxFlow('source', 'sink')
if flow != t:
print('impossible')
else:
for pair in connections.keys():
a, b = map(int, pair.split(','))
res = connections[pair]
if a+b == res:
rsults.append(f'{a} + {b} = {res}')
elif a-b == res:
rsults.append(f'{a} - {b} = {res}')
elif a*b == res:
rsults.append(f'{a} * {b} = {res}')
for rs in rsults:
print(rs)
| true |
56ad7d48ba26309a6c62d5e13299203b93042629 | Python | cjmaio/city-explorer-uber | /app.py | UTF-8 | 5,902 | 2.578125 | 3 | [] | no_license | from flask import Flask, render_template, request, jsonify, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_webpack import Webpack
from geoalchemy2 import Geometry
from sqlalchemy import func, distinct
from os import path, getenv
import csv
from flask.ext.heroku import Heroku
# Initialize the application
app = Flask(__name__)
app.config.update({
'SQLALCHEMY_DATABASE_URI': getenv('PG_DATABASE_URI', 'postgresql://localhost/city-explorer'),
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'DEBUG': True,
'WEBPACK_MANIFEST_PATH': './static/manifest.json',
'UPLOAD_FOLDER': '/tmp/'
})
# Initialize our app with Heroku
heroku = Heroku(app)
# Whe are we located?
here = path.abspath(path.dirname(__file__))
# Initialize our database connection
db = SQLAlchemy(app)
# Initialize our application with Webpack
webpack = Webpack()
webpack.init_app(app)
# Deserialize datetime object into string form for JSON processing.
# Thank you, to http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
def dump_datetime(value):
if value is None:
return None
return [value.strftime('%Y-%m-%d'), value.strftime('%H:%M:%S')]
# Add onto a SQLAlchemy query, finding pickup or dropoff points inside of a polygon (rectangle or polygon)
def append_query_trips(trips, is_rectangle, vertices):
if is_rectangle:
return trips \
.filter(func.ST_Contains(
func.ST_MakeBox2D(func.ST_Point(vertices[0], vertices[1]), func.ST_POINT(vertices[2], vertices[3])),
Trip.dropoff)) \
.filter(func.ST_Contains(
func.ST_MakeBox2D(func.ST_Point(vertices[0], vertices[1]), func.ST_POINT(vertices[2], vertices[3])),
Trip.pickup));
else:
# We are building the LINESTRING() query object dynamically because we do not know how many vertices there are
polygon = 'LINESTRING('
for x in range(len(vertices)):
if x % 2 == 0:
if x != 0:
polygon += ', '
polygon += vertices[x] + ' '
else:
polygon += vertices[x]
polygon += ', ' + vertices[0] + ' ' + vertices[1] + ')'
return trips.filter(func.ST_Contains(func.ST_MakePolygon(func.ST_GeomFromText(polygon)), Trip.pickup)) \
.filter(func.ST_Contains(func.ST_MakePolygon(func.ST_GeomFromText(polygon)), Trip.dropoff));
# Create our Trip model, which contains date, pickup, dropoff, and base data
class Trip(db.Model):
__tablename__ = "trips"
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime)
pickup = db.Column(Geometry('POINT'))
dropoff = db.Column(Geometry('POINT'))
base = db.Column(db.String(10))
def __init__(self, date, pickup_lat, pickup_lon, dropoff_lat, dropoff_lon, base):
self.date = date
self.pickup = 'POINT(' + pickup_lat + ' ' + pickup_lon + ')'
self.dropoff = 'POINT(' + dropoff_lat + ' ' + dropoff_lon + ')'
self.base = base
def __repr__(self):
return '<Date %r>' % self.date
@app.route('/')
def index():
return render_template('index.html')
@app.route("/assets/<path:filename>")
def send_asset(filename):
return send_from_directory(path.join(here, "build"), filename)
@app.route('/importfile', methods=['POST'])
def import_file():
file = request.files['file']
if file:
file.save(path.join(app.config['UPLOAD_FOLDER'], file.filename))
# Open the CSV file uploaded, and add in new Trips to the database
with open(path.join(app.config['UPLOAD_FOLDER'], file.filename)) as csvfile:
tripreader = csv.reader(csvfile, delimiter=',', quotechar='"')
header = next(tripreader)
for row in tripreader:
if row[0] != 'Date/Time':
print('importing...')
# We grab the next trip to act as the "dropoff" point
next_trip = next(tripreader)
trip = Trip(row[0], row[1], row[2], next_trip[1], next_trip[2], row[3])
db.session.add(trip)
db.session.commit()
return 'Import Successful'
else:
return 'There was an error.'
@app.route('/trips', methods=['POST'])
def get_trips():
vertices = request.form.get('vertices')
start_date = request.form.get('date_start')
end_date = request.form.get('date_end')
show_pickups = request.form.get('show_pickups')
show_dropoffs = request.form.get('show_dropoffs')
vertices = vertices.split(',')
pickup_points = []
dropoff_points = []
# We are grabbing the pickup and dropoff points in two separate queries, and adding them together at the end
if show_pickups == 'true':
trips = db.session.query(Trip).with_entities(func.count(Trip.pickup), func.ST_AsText(Trip.pickup));
trips = append_query_trips(trips, (len(vertices) == 4), vertices);
pickup_points = trips.filter(Trip.date.between(start_date, end_date)).group_by(Trip.pickup).all();
if show_dropoffs == 'true':
trips = db.session.query(Trip).with_entities(func.count(Trip.dropoff), func.ST_AsText(Trip.dropoff));
trips = append_query_trips(trips, (len(vertices) == 4), vertices);
dropoff_points = trips.filter(Trip.date.between(start_date, end_date)).group_by(Trip.dropoff).all();
# We are iterating through these here to assign a type to each.
result = []
for i in pickup_points:
result.append({
'point': i[1],
'count': i[0],
'type': 'pickup'
})
for i in dropoff_points:
result.append({
'point': i[1],
'count': i[0],
'type': 'dropoff'
})
return jsonify(results=result)
if __name__ == '__main__':
app.debug = True
app.run()
| true |
4628ad6417f4e110796dfdc18170b1c3df162fa7 | Python | RimanB/FELINE_project | /scRNA_count_zinbwave_10x/scripts/scRNA_Seurat_export_count_matrix.split.merge.py | UTF-8 | 7,204 | 2.9375 | 3 | [] | no_license | #!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import pandas as pd
import re
import os
import argparse
import glob
import psutil
def usage():
test="name"
message='''
python scRNA_Seurat_05individual_from_individual_merge_matrix.py --list FEL011027_responder_vs_non_responder.list
--list: list of patients that need to be merged
'''
print message
class StringConverter(dict):
def __contains__(self, item):
return True
def __getitem__(self, item):
return str
def get(self, default=None):
return str
def mergefiles(dfs=[], on=''):
"""Merge a list of files based on one column"""
if len(dfs) == 1:
return "List only have one element."
elif len(dfs) == 2:
df1 = dfs[0]
df2 = dfs[1]
df = df1.merge(df2, on=on)
return df
# Merge the first and second datafranes into new dataframe
df1 = dfs[0]
df2 = dfs[1]
df = dfs[0].merge(dfs[1], on=on)
print "merging"
print mem_usage(df)
#print memory_usage_psutil()
sys.stdout.flush()
# Create new list with merged dataframe
dfl = []
dfl.append(df)
# Join lists
dfl = dfl + dfs[2:]
dfm = mergefiles(dfl, on)
return dfm
def memory_usage_psutil():
# return the memory usage in percentage like top
process = psutil.Process(os.getpid())
mem = process.get_memory_info()[0] / float(2 ** 20)
return mem
# We're going to be calculating memory usage a lot,
# so we'll create a function to save us some time!
def mem_usage(pandas_obj):
if isinstance(pandas_obj,pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return "{:03.2f} MB".format(usage_mb)
#int8 Byte (-128 to 127)
#int16 Integer (-32768 to 32767)
#int32 Integer (-2147483648 to 2147483647)
#int64 Integer (-9223372036854775808 to 9223372036854775807)
#float16 Half precision float: sign bit, 5 bits exponent, 10 bits mantissa
#float32 Single precision float: sign bit, 8 bits exponent, 23 bits mantissa
#float64 Double precision float: sign bit, 11 bits exponent, 52 bits mantissa
def mem_usage_dtype(pandas_obj):
gl = pandas_obj
for dtype in ['float','int','object']:
selected_dtype = gl.select_dtypes(include=[dtype])
mean_usage_b = selected_dtype.memory_usage(deep=True).sum()
mean_usage_mb = mean_usage_b / 1024 ** 2
print("Total memory usage for {} columns: {:03.2f} MB".format(dtype,mean_usage_mb))
#converted dtype to reduce memory useage
if dtype == 'float':
converted_float = selected_dtype.astype('float32')
#replacing orginal data in df
#gl.loc[:,selected_dtype.columns] = converted_float
mean_usage_b = converted_float.memory_usage(deep=True).sum()
mean_usage_mb = mean_usage_b / 1024 ** 2
print("Total memory usage for {} columns as float32: {:03.2f} MB".format(dtype,mean_usage_mb))
if dtype == 'int':
converted_int = selected_dtype.astype('int16')
#gl.loc[:,selected_dtype.columns] = converted_int
mean_usage_b = converted_int.memory_usage(deep=True).sum()
mean_usage_mb = mean_usage_b / 1024 ** 2
print("Total memory usage for {} columns as int16: {:03.2f} MB".format(dtype,mean_usage_mb))
return gl
def read_file_pd(infile):
# determine and optimize dtype
# Sample 100 rows of data to determine dtypes.
file_test = pd.read_csv(infile, sep="\t", header=0, nrows=100)
float_cols = [c for c in file_test if file_test[c].dtype == "float64"]
int_cols = [c for c in file_test if file_test[c].dtype == "int64"]
if float_cols > 0:
dtype_cols = {c: np.float16 for c in float_cols}
elif int_cols > 0:
dtype_cols = {c: np.int16 for c in int_cols}
file_df = pd.read_csv(infile, sep="\t", header=0, dtype=dtype_cols)
# check memory usage
print "infile: %s" %(infile)
print "original size"
print file_df.dtypes[1:5,]
print file_df.iloc[1:5,1:50]
print mem_usage(file_df)
#file_df_mini = mem_usage_dtype(file_df)
#print "converted size"
#print file_df_mini.dtypes[1:5,]
#print file_df_mini.iloc[1:5,1:50]
#print mem_usage(file_df_mini)
sys.stdout.flush()
#return file_df_mini
return file_df
def get_file_df(patients, filetype):
df_list = []
for p in patients:
filename = '%s%s' %(p, filetype)
if os.path.exists(filename):
df_file = read_file_pd(filename)
df_list.append(df_file)
return df_list
#FEL027_M_TATATCCTCAAGGTGG
def read_patient_list(infile, prefix):
file_df = pd.read_table(infile, header=None, names=['Patient'])
patients = ['%s_%s' %(prefix, x) for x in list(file_df['Patient'])]
print patients
return patients
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--list')
parser.add_argument('--prefix')
parser.add_argument('-o', '--output')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.list) > 0 and len(args.prefix) > 0
except:
usage()
sys.exit(2)
if not args.output:
args.output = "test_merge"
#file_list = glob.glob('%s/*.ssGSEA.scores.txt' %(args.input))
#df_list = []
#for f in sorted(file_list):
# df_file = read_file_pd(f)
# df_list.append(df_file)
patients = read_patient_list(args.list, args.prefix)
#filetypes = ['_10x.seurat.cell_type.anno.txt']
#for ft in filetypes:
# outfile = '%s.%s' %(args.list, ft)
# if re.compile(r'.list$').search(args.list):
# outfile = re.sub(r'.list', r'%s' %(ft), args.list)
# df_list = get_file_df(patients, ft)
# df_merged = pd.concat(df_list)
# df_merged.to_csv(outfile, sep="\t", index=False)
#filetypes = ['_10x_gene_symbols.scaled.counts.normalized_to_control.txt', '_10x_gene_symbols.scaled.Cancer_cells.mean.txt', '_10x_gene_symbols.scaled.Endothelial_cells.mean.txt', '_10x_gene_symbols.scaled.Fibroblasts.mean.txt', '_10x_gene_symbols.scaled.Macrophages.mean.txt']
#filetypes = ['_gene_symbols.normalized.counts.txt', '_gene_symbols.scaled.counts.txt', '_gene_symbols.CPM.txt', '_gene_symbols.raw.counts.txt']
#filetypes = ['_gene_symbols.normalized.counts.txt']
filetypes = ['_gene_symbols.raw.counts.txt', '_gene_symbols.CPM.txt']
#filetypes = ['_gene_symbols.raw.counts.txt']
#filetypes = ['_gene_symbols.CPM.txt']
for ft in filetypes:
print ft
outfile = '%s%s' %(args.prefix, ft)
df_list = get_file_df(patients, ft)
df_merged = mergefiles(df_list, on="Gene.ID")
df_merged.to_csv(outfile, sep="\t", index=False)
#merge
#outfile = '%s.ssGSEA.scores.txt' %(args.output)
#df_merged = mergefiles(df_list, on="Gene Set")
#df_merged.to_csv(outfile, sep="\t", index=False)
if __name__ == '__main__':
main()
| true |
4c804ae98204e6aca90dc36ec23474824319ee03 | Python | JaykeMeijer/weatherserver | /devices.py | UTF-8 | 1,230 | 2.578125 | 3 | [] | no_license | from database import database
import json
def get_devices(view):
devices_db = database.get_devices(view)
devices = []
for d in devices_db:
devices.append({'id': d['id'],
'name': d['prettyname']})
return devices
def get_device(device_id):
device = database.get_device(device_id)
if device is None:
return None
return {'name': device['name'],
'prettyname': device['prettyname'],
'location': device['location'],
'timezone': device['timezone']}
def get_id(name_or_id):
try:
device_id = int(name_or_id)
if database.device_id_exists(device_id):
return device_id
except ValueError:
pass
return database.get_device_id(name_or_id)
def handle_get_device(data, view=None):
if 'device' not in data:
return 400, 'Missing device'
device_id = get_id(data['device'])
if device_id is None:
return 400, 'Unknown device'
device = get_device(device_id)
if device is None:
return 400, 'Unknown device'
else:
return 200, json.dumps(device)
def handle_get_device_list(data, view=None):
return 200, json.dumps(get_devices(view))
| true |
4bf8b0aca8d6f67d99a4e3fc034c0604236a3bf2 | Python | ECS-251-W2020/final-project-torchfly | /torchfly_dev/text/decode/nucleus_sampling.py | UTF-8 | 1,927 | 2.765625 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as F
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
"""
# batch support!
if top_k > 0:
values, _ = torch.topk(logits, top_k)
min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1])
logits = torch.where(logits < min_values,
torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'),
logits)
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value)
logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits)
return logits | true |
5e8ced2f83cc809f7e610483dbdb8810eb6e46a8 | Python | nbgao/Python-Project | /NLP/NLP_jieba.py | UTF-8 | 1,653 | 3.171875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import jieba
#全模式
seg_list1 = jieba.cut("我来到杭州电子科技大学",cut_all=True)
print ("Full Mode: " + "/ ".join(seg_list1))
#精确模式
seg_list2 = jieba.cut("我来到杭州电子科技大学",cut_all=False)
print ("Default Mode: " + "/ ".join(seg_list2))
#默认是精确模式
seg_list3 = jieba.cut("我来到了网易杭州研究院大厦")
print (", ".join(seg_list3))
seg_list4 = jieba.cut_for_search("高鹏昺毕业于中国科学院计算所,后在日本京都大学深造")
print (", ".join(seg_list4))
import jieba.analyse
import jieba.posseg as pseg
text = u'中国共产党已经走过90年的光辉历程。90年来,一代又一代共产党人团结带领人民在艰难困苦中奋起、在艰辛探索中前进,完成了民族独立和人民解放的伟大历史任务,建立了社会主义制度,开辟了中国特色社会主义道路,创造了中华民族发展史上最辉煌的业绩。坚持党的建设为党领导的伟大事业服务,是我们党在领导革命、建设、改革的伟大事业中,不断加强和改进党的自身建设的一条宝贵经验。深刻认识和把握这条经验,对于加强新形势下党的建设具有重要意义。'
#基于TF-IDF
keywords = jieba.analyse.extract_tags(text, topK=20, withWeight=True, allowPOS=())
for item in keywords:
print (item[0],item[1])
#基于TextRank
keywords = jieba.analyse.textrank(text, topK=20, withWeight=True, allowPOS=('ns','n','vn','v'))
for item in keywords:
print (item[0],item[1])
#分词词性
words = pseg.cut(text)
for word, flag in words:
print ('%s, %s' % (word, flag)) | true |
bce8b9ea7cee3a0e242a54b30a20978290d8c552 | Python | yr0901/algo_yeeun | /baekjoon/A/분할정복/BJ_1780종이의개수.py | UTF-8 | 1,064 | 2.640625 | 3 | [] | no_license | #종이의 개수
import sys
sys.stdin = open('input.txt','r')
def CHECK(N, starty, startx, cut):
global c
num = N//cut
if num == 0:
num = 1
for sy in range(0,N,num):
for sx in range(0, N, num):
stop=False
if starty+sy >= N or startx+sx >= N:
break
default = arr[starty+sy][startx+sx]
#print(starty, startx, sy, sx)
for y in range(num):
for x in range(num):
if stop:
break
nexty = starty+sy+y
nextx = startx+sx+x
if 0<=nexty<N and 0<=nextx<N and arr[nexty][nextx] != default:
CHECK(N, starty+sy, startx+sx, cut*3)
stop = True
if stop:
break
if not stop:
count[default+1] += 1
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
count= [0] * 3
c = 0
CHECK(N,0,0,1)
for t in count:
print(t) | true |
4092aada4848f0ef83aca7f2a2c6a92abfafe164 | Python | hammadk373/crawley | /crawley/parser/parsers/recursive_parser.py | UTF-8 | 343 | 2.875 | 3 | [] | no_license | from parser import Parser, ParserException
class RecursiveParser(Parser):
def parse(self):
pass
def _can_parse(self):
return self.dsl.is_recursive()
def _get_exception(self):
return ParserException("Can't Parse, only Recursive admitted, Line %d" % self.dsl.number) | true |
95e0e39d81a8ce05e1725dcf3df9e7eea224e2e9 | Python | Tommy69-69/3D-Test | /3D-Star.py | UTF-8 | 2,790 | 3.625 | 4 | [] | no_license | """
@author Francisco Grijalva
@date 11/Enero/2021
Rotate a five pointed star 3D like rotation circles 3D exercise
"""
import numpy as np
import matplotlib.pyplot as plt
#import functions tools3D
import tools3D as Tools
plt.axis([0,150,100,0])
plt.axis()
plt.grid()
# Ejes X Y
plt.plot([8,130],[8,8],color='k')
plt.text(120,6,'X')
plt.plot([8,8],[8,85],color='k')
plt.text(4,80,'Y')
# Definir lista de puntos
#Exterior de la estrella
x=[]
y=[]
z=[]
xg=[]
yg=[]
zg=[]
#Exterior de la estrella
xx=[]
yy=[]
zz=[]
xg2=[]
yg2=[]
zg2=[]
# Llenar la lista de puntos
p1=np.radians(0)
p2=np.radians(360)
dp=np.radians(72)#Distancia entre punto y punto
rad=20
#Set coordenadas de los puntos del circulo
for pi in np.arange(p1,p2+dp,dp):
xp = np.cos(pi)*rad
yp = np.sin(pi)*rad
zp = 0
x.append(xp)
y.append(yp)
z.append(zp)
xg.append(xp)
yg.append(yp)
zg.append(zp)
# Definir funcion plot circle
def plotcircle(xg,yg,zg):
"""xglast = xg[0]
yglast = yg[0]
for i in range( len(x) ):
plt.plot([xglast,xg[i]], [yglast,yg[i]], color='g')
xglast = xg[i]
yglast = yg[i]
plt.scatter(xc,yc,s=5)#Plot star center
"""
plt.plot([xg[0],xg[2]],[yg[0],yg[2]],color='r')
plt.plot([xg[2],xg[4]],[yg[2],yg[4]],color='r')
plt.plot([xg[4],xg[1]],[yg[4],yg[1]],color='r')
plt.plot([xg[1],xg[3]],[yg[1],yg[3]],color='r')
plt.plot([xg[3],xg[0]],[yg[3],yg[0]],color='r')
plt.scatter(xc,yc,s=5,color='g')
#Apply transform a las coordenadas and plot it
def plotCircleX(xc,yc,zc,Rx):#Calcuclar puntos de rot y plot circle
for i in range(len(x)):
[xg[i],yg[i],zg[i]] = Tools.rotRx(xc,yc,zc,x[i],y[i],z[i],Rx)
[x[i],y[i],z[i]] = [xg[i]-xc,yg[i]-yc,zg[i]-zc]
plotcircle(xg,yg,zg)
def plotCircleY(xc,yc,zc,Ry):#Calcuclar puntos de rot y plot circle
for i in range(len(x)):
[xg[i],yg[i],zg[i]] = Tools.rotRy(xc,yc,zc,x[i],y[i],z[i],Ry)
[x[i],y[i],z[i]] = [xg[i]-xc,yg[i]-yc,zg[i]-zc]
plotcircle(xg,yg,zg)
def plotCircleZ(xc,yc,zc,Rz):#Calcuclar puntos de rot y plot circle
for i in range(len(x)):
[xg[i],yg[i],zg[i]] = Tools.rotRz(xc,yc,zc,x[i],y[i],z[i],Rz)
[x[i],y[i],z[i]] = [xg[i]-xc,yg[i]-yc,zg[i]-zc]
plotcircle(xg,yg,zg)
Rx=np.radians(0)
xc=30
yc=50
zc=20
plotCircleX(xc,yc,zc,Rx)
plt.text(22,80,'(a)')
plt.text(20,90,'R=0')
# Plot star b
Rx=np.radians(45)
xc=60
yc=50
zc=20
plotCircleX(xc,yc,zc,Rx)
plt.text(52,80,'(b)')
plt.text(50,90,'Rx=45')
# Plot star c
Ry=np.radians(70)
xc=90
yc=50
zc=20
plotCircleY(xc,yc,zc,Ry)
plt.text(82,80,'(c)')
plt.text(80,90,'Ry=70')
# Plot star d
Rz=np.radians(90)
xc=120
yc=50
zc=20
plotCircleZ(xc,yc,zc,Rz)
plt.text(112,80,'(d)')
plt.text(110,90,'Rz=90')
plt.show()
| true |
83ffe7c3e7ac320de33c885b107276a04de6ad85 | Python | suihkulokki/tf-m-ci-scripts | /tfm_ci_pylib/utils.py | UTF-8 | 18,794 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python3
""" utils.py:
various simple and commonly used methods and classes shared by the scripts
in the CI environment """
from __future__ import print_function
__copyright__ = """
/*
* Copyright (c) 2018-2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
"""
__author__ = "tf-m@lists.trustedfirmware.org"
__project__ = "Trusted Firmware-M Open CI"
__version__ = "1.2.0"
import os
import re
import sys
import yaml
import requests
import argparse
import json
import itertools
from shutil import move
from collections import OrderedDict, namedtuple
from subprocess import Popen, PIPE, STDOUT, check_output
def detect_python3():
""" Return true if script is run with Python3 interpreter """
return sys.version_info > (3, 0)
def find_missing_files(file_list):
""" Return the files that dot not exist in the file_list """
F = set(file_list)
T = set(list(filter(os.path.isfile, file_list)))
return list(F.difference(T))
def resolve_rel_path(target_path, origin_path=os.getcwd()):
""" Resolve relative path from origin to target. By default origin
path is current working directory. """
common = os.path.commonprefix([origin_path, target_path])
return os.path.relpath(target_path, common)
def print_test_dict(data_dict,
pad_space=80,
identation=5,
titl="Summary",
pad_char="*"):
""" Configurable print formatter aimed for dictionaries of the type
{"TEST NAME": "RESULT"} used in CI systems. It will also return
the string which is printing """
# Calculate pad space bewteen variables x, y t achieve alignment on y
# taking into consideration a maximum aligment boundary p and
# possible indentation i
def flex_pad(x, y, p, i):
return " " * (p - i * 2 - len(x) - len(y)) + "-> "
# Calculate the padding for the dataset
tests = [k + flex_pad(k,
v,
pad_space,
identation) + v for k, v in data_dict.items()]
# Add the identation
tests = map(lambda x: " " * identation + x, tests)
# Convert to string
tests = "\n".join(tests)
# Calcuate the top header padding ceiling any rounding errors
hdr_pad = (pad_space - len(titl) - 3) / 2
if detect_python3():
hdr_pad = int(hdr_pad)
# Generate a print formatting dictionary
print_dict = {"pad0": pad_char * (hdr_pad),
"pad1": pad_char * (hdr_pad + 1 if len(titl) % 2
else hdr_pad),
"sumry": tests,
"pad2": pad_char * pad_space,
"titl": titl}
# Compose & print the report
r = "\n%(pad0)s %(titl)s %(pad1)s\n\n%(sumry)s\n\n%(pad2)s\n" % print_dict
print(r)
return r
def print_test(t_name=None, t_list=None, status="failed", tname="Tests"):
""" Print a list of tests in a stuctured ascii table format """
gfx_line1 = "=" * 80
gfx_line2 = "\t" + "-" * 70
if t_name:
print("%(line)s\n%(name)s\n%(line)s" % {"line": gfx_line1,
"name": t_name})
print("%s %s:" % (tname, status))
print(gfx_line2 + "\n" +
"\n".join(["\t| %(key)s%(pad)s|\n%(line)s" % {
"key": n,
"pad": (66 - len(n)) * " ",
"line": gfx_line2} for n in t_list]))
def test(test_list,
test_dict,
test_name="TF-M Test",
pass_text=["PASSED", "PRESENT"],
error_on_failed=True,
summary=True):
""" Using input of a test_lst and a test results dictionary in the format
of test_name: resut key-value pairs, test() method will verify that Every
single method in the test_list has been tested and passed. Pass and Failed,
status tests can be overriden and error_on_failed flag, exits the script
with failure if a single test fails or is not detected. Returns a json
containing status and fields for each test passed/failed/missing, if error
on failed is not set.
"""
t_report = {"name": test_name,
"success": None,
"passed": [],
"failed": [],
"missing": []}
# Clean-up tests that are not requested by test_list
test_dict = {k: v for k, v in test_dict.items() if k in test_list}
# Calculate the difference of the two sets to find missing tests
t_report["missing"] = list(set(test_list) - set(test_dict.keys()))
# Sor the items into the apropriate lists (failed or passed)
# based on their status.
for k, v in test_dict.items():
# print(k, v)
key = "passed" if v in pass_text else "failed"
t_report[key] += [k]
# For the test to pass every singe test in test_list needs to be present
# and be in the passed list
if len(test_list) == len(t_report["passed"]):
t_report["success"] = True
else:
t_report["success"] = False
# Print a summary
if summary:
if t_report["passed"]:
print_test(test_name, t_report["passed"], status="passed")
if t_report["missing"]:
print_test(test_name, t_report["missing"], status="missing")
if t_report["failed"]:
print_test(test_name, t_report["failed"], status="Failed")
print("\nTest %s has %s!" % (t_report["name"],
" been successful" if t_report["success"]
else "failed"))
print("-" * 80)
if error_on_failed:
syscode = 0 if t_report["success"] else 1
sys.exit(syscode)
return t_report
def save_json(f_name, data_object):
""" Save object to json file """
with open(f_name, "w") as F:
F.write(json.dumps(data_object, indent=2))
def save_dict_json(f_name, data_dict, sort_list=None):
""" Save a dictionary object to file with optional sorting """
if sort_list:
data_object = (sort_dict(data_dict, sort_list))
save_json(f_name, data_object)
def sort_dict(config_dict, sort_order_list=None):
""" Create a fixed order disctionary out of a config dataset """
if sort_order_list:
ret = OrderedDict([(k, config_dict[k]) for k in sort_order_list])
else:
ret = OrderedDict([(k, config_dict[k]) for k in sorted(config_dict)])
return ret
def load_json(f_name):
""" Load object from json file """
with open(f_name, "r") as F:
try:
return json.loads(F.read())
except ValueError as exc:
print("No JSON object could be decoded from file: %s" % f_name)
except IOError:
print("Error opening file: %s" % f_name)
raise Exception("Failed to load file")
def load_yaml(f_name):
# Parse command line arguments to override config
with open(f_name, "r") as F:
try:
return yaml.load(F.read())
except yaml.YAMLError as exc:
print("Error parsing file: %s" % f_name)
except IOError:
print("Error opening file: %s" % f_name)
raise Exception("Failed to load file")
def subprocess_log(cmd, log_f, prefix=None, append=False, silent=False):
""" Run a command as subproccess an log the output to stdout and fileself.
If prefix is spefified it will be added as the first line in file """
with open(log_f, 'a' if append else "w") as F:
if prefix:
F.write(prefix + "\n")
pcss = Popen(cmd,
stdout=PIPE,
stderr=STDOUT,
shell=True,
env=os.environ)
for line in pcss.stdout:
if detect_python3():
line = line.decode("utf-8")
if not silent:
sys.stdout.write(line)
F.write(line)
pcss.communicate()
return pcss.returncode
return
def run_proccess(cmd):
""" Run a command as subproccess an log the output to stdout and file.
If prefix is spefified it will be added as the first line in file """
pcss = Popen(cmd,
stdout=PIPE,
stderr=PIPE,
shell=True,
env=os.environ)
pcss.communicate()
return pcss.returncode
def get_pid_status(pid):
""" Read the procfc in Linux machines to determine a proccess's statusself.
Returns status if proccess exists or None if it does not """
try:
with open("/proc/%s/status" % pid, "r") as F:
full_state = F.read()
return re.findall(r'(?:State:\t[A-Z]{1} \()(\w+)',
full_state, re.MULTILINE)[0]
except Exception as e:
print("Exception", e)
def check_pid_status(pid, status_list):
""" Check a proccess's status againist a provided lists and return True
if the proccess exists and has a status included in the list. (Linux) """
pid_status = get_pid_status(pid)
if not pid_status:
print("PID %s does not exist." % pid)
return False
ret = pid_status in status_list
# TODO Remove debug print
if not ret:
print("PID status %s not in %s" % (pid_status, ",".join(status_list)))
return ret
def list_chunks(l, n):
""" Yield successive n-sized chunks from l. """
for i in range(0, len(l), n):
yield l[i:i + n]
def export_config_map(config_m, dir=None):
""" Will export a dictionary of configurations to a group of JSON files """
_dir = dir if dir else os.getcwd()
for _cname, _cfg in config_m.items():
_cname = _cname.lower()
_fname = os.path.join(_dir, _cname + ".json")
print("Exporting config %s" % _fname)
save_json(_fname, _cfg)
def gen_cfg_combinations(name, categories, *args):
""" Create a list of named tuples of `name`, with elements defined in a
space separated string `categories` and equal ammount of lists for said
categories provided as arguments. Order of arguments should match the
order of the categories lists """
build_config = namedtuple(name, categories)
return [build_config(*x) for x in itertools.product(*args)]
def show_progress(current_count, total_count):
""" Display the percent progress percentage of input metric a over b """
progress = int((current_count / total_count) * 100)
completed_count = int(progress * 0.7)
remaining_count = 70 - completed_count
print("[ %s%s | %d%% ]" % ("#" * completed_count,
"~" * remaining_count,
progress))
def get_cmd_args(descr="", parser=None):
""" Parse command line arguments """
# Parse command line arguments to override config
if not parser:
parser = argparse.ArgumentParser(description=descr)
return parser.parse_args()
def arm_non_eabi_size(filename):
""" Run arm-non-eabi-size command and parse the output using regex. Will
return a tuple with the formated data as well as the raw output of the
command """
size_info_rex = re.compile(r'^\s+(?P<text>[0-9]+)\s+(?P<data>[0-9]+)\s+'
r'(?P<bss>[0-9]+)\s+(?P<dec>[0-9]+)\s+'
r'(?P<hex>[0-9a-f]+)\s+(?P<file>\S+)',
re.MULTILINE)
eabi_size = check_output(["arm-none-eabi-size",
filename],
timeout=18).decode('UTF-8').rstrip()
size_data = re.search(size_info_rex, eabi_size)
return [{"text": size_data.group("text"),
"data": size_data.group("data"),
"bss": size_data.group("bss"),
"dec": size_data.group("dec"),
"hex": size_data.group("hex")}, eabi_size]
def list_subdirs(directory):
directory = os.path.abspath(directory)
abs_sub_dirs = [os.path.join(directory, n) for n in os.listdir(directory)]
return [n for n in abs_sub_dirs if os.path.isdir(os.path.realpath(n))]
def get_local_git_info(directory, json_out_f=None):
""" Extract git related information from a target directory. It allows
optional export to json file """
directory = os.path.abspath(directory)
cur_dir = os.path.abspath(os.getcwd())
os.chdir(directory)
# System commands to collect information
cmd1 = "git log HEAD -n 1 --pretty=format:'%H%x09%an%x09%ae%x09%ai%x09%s'"
cmd2 = "git log HEAD -n 1 --pretty=format:'%b'"
cmd3 = "git remote -v | head -n 1 | awk '{ print $2}';"
cmd4 = ("git ls-remote --heads origin | "
"grep $(git rev-parse HEAD) | cut -d / -f 3")
git_info_rex = re.compile(r'(?P<body>^[\s\S]*?)((?:Change-Id:\s)'
r'(?P<change_id>.*)\n)((?:Signed-off-by:\s)'
r'(?P<sign_off>.*)\n?)', re.MULTILINE)
proc_res = []
for cmd in [cmd1, cmd2, cmd3, cmd4]:
r, e = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()
if e:
print("Error", e)
return
else:
try:
txt_body = r.decode('ascii')
except UnicodeDecodeError as E:
txt_body = r.decode('utf-8')
proc_res.append(txt_body.rstrip())
# Unpack and tag the data
hash, name, email, date, subject = proc_res[0].split('\t')
_raw_body = proc_res[1]
_bd_items = re.findall(r'(Signed-off-by|Change-Id)', _raw_body,
re.MULTILINE)
signed_off = None
body = None
change_id = None
# If both sign-off and gerrit-id exist
if len(_bd_items) == 2:
m = git_info_rex.search(_raw_body)
print(git_info_rex.findall(_raw_body))
if m is not None:
match_dict = m.groupdict()
if "body" in match_dict.keys():
body = match_dict["body"]
if "sign_off" in match_dict.keys():
signed_off = match_dict["sign_off"]
if "change_id" in match_dict.keys():
change_id = match_dict["change_id"]
else:
print("Error: Could not regex parse message", repr(_raw_body))
body = _raw_body
# If only one of sign-off / gerrit-id exist
elif len(_bd_items) == 1:
_entry_key = _bd_items[0]
body, _extra = _raw_body.split(_entry_key)
if _entry_key == "Change-Id":
change_id = _extra
else:
signed_off = _extra
# If the message contains commit message body only
else:
body = _raw_body
# Attempt to read the branch from Gerrit Trigger
try:
branch = os.environ["GERRIT_BRANCH"]
# IF not compare the commit hash with the remote branches to determine the
# branch of origin. Warning this assumes that only one branch has its head
# on this commit.
except KeyError as E:
branch = proc_res[3]
remote = proc_res[2]
# Internal Gerrit specific code
# Intended for converting the git remote to a more usuable url
known_remotes = ["https://gerrit.oss.arm.com",
"http://gerrit.mirror.oss.arm.com"]
for kr in known_remotes:
if kr in remote:
print("Applying Remote specific patch to remote", kr)
remote = remote.split(kr)[-1][1:]
print("REMOTE", remote)
remote = "%s/gitweb?p=%s.git;a=commit;h=%s" % (kr, remote, hash)
break
out = {"author": name.strip(),
"email": email.strip(),
"dir": directory.strip(),
"remote": remote.strip(),
"date": date.strip(),
"commit": hash.strip(),
"subject": subject.strip(),
"message": body.strip(),
"change_id": change_id.strip() if change_id is not None else "N.A",
"sign_off": signed_off.strip() if signed_off is not None else "N.A",
"branch": branch.strip()}
# Restore the directory path
os.chdir(cur_dir)
if json_out_f:
save_json(json_out_f, out)
return out
def get_remote_git_info(url):
""" Collect git information from a Linux Kernel web repository """
auth_rex = re.compile(r'(?:<th>author</th>.*)(?:span>)(.*)'
r'(?:;.*\'right\'>)([0-9\+\-:\s]+)')
# commiter_rex = re.compile(r'(?:<th>committer</th>.*)(?:</div>)(.*)'
# r'(?:;.*\'right\'>)([0-9\+\-:\s]+)')
subject_rex = re.compile(r'(?:\'commit-subject\'>)(.*)(?:</div>)')
body_rex = re.compile(r'(?:\'commit-msg\'>)([\s\S^<]*)(?:</div>'
r'<div class=\'diffstat-header\'>)', re.MULTILINE)
content = requests.get(url).text
author, date = re.search(auth_rex, content).groups()
subject = re.search(subject_rex, content).groups()[0]
body = re.search(body_rex, content).groups()[0]
remote, hash = url.split("=")
outdict = {"author": author,
"remote": remote[:-3],
"date": date,
"commit": hash,
"subject": subject,
"message": body}
# Clean up html noise
return {k: re.sub(r'&[a-z]t;?', "", v) for k, v in outdict.items()}
def convert_git_ref_path(dir_path):
""" If a git long hash is detected in a path move it to a short hash """
# Detect a git hash on a directory naming format of name_{hash},
# {hash}, name-{hash}
git_hash_rex = re.compile(r'(?:[_|-])*([a-f0-9]{40})')
# if checkout directory name contains a git reference convert to short
git_hash = git_hash_rex.findall(dir_path)
if len(git_hash):
d = dir_path.replace(git_hash[0], git_hash[0][:7])
print("Renaming %s -> %s", dir_path, d)
move(dir_path, d)
dir_path = d
return dir_path
def list_filtered_tree(directory, rex_filter=None):
ret = []
for path, subdirs, files in os.walk(directory):
for fname in files:
ret.append(os.path.join(path, fname))
if rex_filter:
rex = re.compile(rex_filter)
return [n for n in ret if rex.search(n)]
else:
return ret
def gerrit_patch_from_changeid(remote, change_id):
""" Use Gerrit's REST api for a best effort to retrieve the url of the
patch-set under review """
try:
r = requests.get('%s/changes/%s' % (remote, change_id),
headers={'Accept': 'application/json'})
resp_data = r.text[r.text.find("{"):].rstrip()
change_no = json.loads(resp_data)["_number"]
return "%s/#/c/%s" % (remote, change_no)
except Exception as E:
print("Failed to retrieve change (%s) from URL %s" % (change_id,
remote))
print("Exception Thrown:", E)
raise Exception()
| true |
ae5306eab1ec4a140342be8dc793b1c7bebd7b17 | Python | PIlin/blender_tri_split | /tri_split.py | UTF-8 | 6,295 | 2.65625 | 3 | [] | no_license | import bpy
import mathutils
import bmesh
def deleteObject(obj):
for sc in tuple(obj.users_scene):
sc.objects.unlink(obj)
objData = obj.data
bpy.data.objects.remove(obj)
bpy.data.meshes.remove(objData)
def deleteUnusedMeshes():
for mesh in [m for m in bpy.data.meshes if m.users == 0]:
bpy.data.meshes.remove(mesh)
class Plane:
def __init__(self, n, p):
self.n = n.normalized()
self.p = p
self.d = n.dot(p)
def joinBmeshes(bm1, bm2):
print(len(bm1.verts), len(bm1.faces))
tmpMesh = bpy.data.meshes.new('.tempMeshForJoin')
bm2.to_mesh(tmpMesh)
bm1.from_mesh(tmpMesh)
print(len(bm1.verts), len(bm1.faces))
bpy.data.meshes.remove(tmpMesh)
def testSplitLine(plane):
bm = bmesh.new()
print('plane n =', plane.n, ' plane.p =', plane.p, ' plane.d =', plane.d)
A = mathutils.Vector((-1, 1, 0))
B = mathutils.Vector((2, 0, 1))
print('A =', A)
print('B =', B)
dA = plane.n.dot(A) - plane.d
dB = plane.n.dot(B) - plane.d
print('dA =', dA)
print('dB =', dB)
def classify(x):
if x == 0: return 0
elif x > 0: return 1
return -1
clA = classify(dA)
clB = classify(dB)
print('clA =', clA)
print('clB =', clB)
if (clA == 0) or (clB == 0) or (clA == clB):
# points on same side of the plane, or on the plane
vA = bm.verts.new(A)
vB = bm.verts.new(B)
bm.edges.new((vA, vB))
else:
X = A + (-dA / (dB - dA)) * (B - A)
print('X =', X)
vA = bm.verts.new(A)
vB = bm.verts.new(B)
vX = bm.verts.new(X)
bm.edges.new((vA, vX))
bm.edges.new((vX, vB))
pass
return bm
def classifyDV(x, eps=0):
if x > eps: return 1
elif x < -eps: return -1
return 0
def buildFace(bm, verts):
print('buildFace', verts)
bverts = []
for v in verts:
bv = bm.verts.new(v)
bverts.append(bv)
return bm.faces.new(bverts)
def calcSplitPoint(A, B, dA, dB):
return A + (-dA / (dB - dA)) * (B - A)
def splitFaceTwoPoints(verts, dverts, clVerts, outBm):
print('splitFaceTwoPoints', verts, dverts, clVerts)
for i in range(3):
i1 = (i + 1) % 3
i2 = (i + 2) % 3
if (clVerts[i] != clVerts[i1]) and (clVerts[i] != clVerts[i2]):
A = verts[i]
dA = dverts[i]
B = verts[i1]
dB = dverts[i1]
C = verts[i2]
dC = dverts[i2]
break
X1 = calcSplitPoint(A, B, dA, dB)
X2 = calcSplitPoint(A, C, dA, dC)
vA = outBm.verts.new(A)
vB = outBm.verts.new(B)
vC = outBm.verts.new(C)
vX1 = outBm.verts.new(X1)
vX2 = outBm.verts.new(X2)
outBm.faces.new((vA, vX1, vX2))
outBm.faces.new((vX1, vB, vC))
outBm.faces.new((vX1, vC, vX2))
return
def splitFaceOnePoint(verts, dverts, clVerts, outBm):
print('splitFaceOnePoint', verts, dverts, clVerts)
for i in range(3):
if clVerts[i] == 0:
i1 = (i + 1) % 3
i2 = (i + 2) % 3
A = verts[i]
dA = dverts[i]
B = verts[i1]
dB = dverts[i1]
C = verts[i2]
dC = dverts[i2]
X = calcSplitPoint(B, C, dB, dC)
vA = outBm.verts.new(A)
vB = outBm.verts.new(B)
vC = outBm.verts.new(C)
vX = outBm.verts.new(X1)
outBm.faces.new((vA, vB, vX))
outBm.faces.new((vA, vX, vC))
return
def splitFace(face, plane, outBm):
verts = [v.co for v in face.verts]
dverts = [0, 0, 0]
clVerts = [0, 0, 0]
sides = 0
for i in range(3):
V = verts[i]
dV = plane.n.dot(V) - plane.d
dverts[i] = dV
clV = classifyDV(dV)
clVerts[i] = clV
countNeg = clVerts.count(-1)
countPos = clVerts.count(1)
countOnPlane = clVerts.count(0)
print(verts, dverts, clVerts)
assert(countNeg + countPos + countOnPlane == 3)
if ((countNeg == 1) and (countPos == 2)) or ((countNeg == 2) and (countPos == 1)):
splitFaceTwoPoints(verts, dverts, clVerts, outBm)
elif (countOnPlane == 1) and (countPos == 1):
splitFaceOnePoint(verts, dverts, clVerts, outBm)
else:
buildFace(outBm, verts)
return
def splitObjectMesh(obj, plane):
bm = bmesh.new()
bm.from_mesh(obj.data)
bm.transform(obj.matrix_world)
bmesh.ops.triangulate(bm, faces=bm.faces)
outBm = bmesh.new()
print('source faces count =', len(bm.faces))
for face in bm.faces:
splitFace(face, plane, outBm)
bm.free()
print('result faces count =', len(outBm.faces))
return outBm
def main():
planeObjectName = 'PLANE'
resultObjectName = 'RESULT'
resultMeshName = resultObjectName + '_MESH'
if resultObjectName in bpy.data.objects:
deleteObject(bpy.data.objects[resultObjectName])
resultMesh = bpy.data.meshes.new(resultMeshName)
resultObj = bpy.data.objects.new(resultObjectName, resultMesh)
bpy.context.scene.objects.link(resultObj)
sc = bpy.context.scene
#objects = bpy.context.selected_objects
objects = sc.objects
if not objects:
print("No objects selected")
return
if planeObjectName in bpy.data.objects:
planeObj = bpy.data.objects[planeObjectName]
print('mesh plane norm', planeObj.data.polygons[0].normal)
n = planeObj.matrix_world.to_quaternion() * mathutils.Vector(planeObj.data.polygons[0].normal)
p = mathutils.Vector(planeObj.matrix_world.translation)
print('found plane', n, p)
splitPlane = Plane(n, p)
else:
splitPlane = Plane(mathutils.Vector((1, 0, 0)), mathutils.Vector((0.33, 1, 1)))
tmpResultBMesh = bmesh.new()
for obj in objects:
if obj.name == resultObj.name:
continue
if obj.name == planeObjectName:
continue
if obj.type != 'MESH':
continue
print(obj.name)
bm = splitObjectMesh(obj, splitPlane)
joinBmeshes(tmpResultBMesh, bm)
bm.free()
# bm = testSplitLine(splitPlane)
# joinBmeshes(tmpResultBMesh, bm)
# bm.free()
tmpResultBMesh.to_mesh(resultMesh)
tmpResultBMesh.free()
sc.update()
main() | true |
ec884d35fbd5f87934cbe6c08e2b8e7b575c52c5 | Python | GolamRabbani20/PYTHON-A2Z | /ANISUL'S_VIDEOS/Guese_Game.py | UTF-8 | 394 | 4.3125 | 4 | [] | no_license | from random import randint
for i in range(1,101):
GuessNumber=int(input("\nEnter a number from 1 to 10:"))
RandomNumber=randint(1,10)
if RandomNumber==GuessNumber:
print("\nCongratulation! You have won the game.")
print("The RandomNumber is ", RandomNumber)
else:
print("\nSorry! You have lost the game.")
print("The RandomNumber is ",RandomNumber)
| true |
ab3bc9900c10db5676eacf232d8fac85f102e907 | Python | ChrisYoungGH/LeetCode | /350.IntersectionOfTwoArraysII/intersection.py | UTF-8 | 542 | 3.28125 | 3 | [] | no_license | class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
unique1 = {}
for n in nums1:
unique1[n] = unique1.get(n, 0) + 1
ans = []
for n in nums2:
if unique1.get(n, 0) > 0:
ans.append(n)
unique1[n] -= 1
return ans
if __name__ == '__main__':
nums1 = [1,2]
nums2 = [1,1]
print Solution().intersection(nums1, nums2)
| true |
1b1399a739042d30263a8ed73aae129c33f8a9ca | Python | pavelkrizek/hcrystalball | /src/hcrystalball/wrappers/_base.py | UTF-8 | 5,345 | 3.171875 | 3 | [
"MIT"
] | permissive | import inspect
from types import FunctionType
from abc import ABCMeta, abstractmethod
from sklearn.base import BaseEstimator
def get_clean_param_dict(signature):
"""Provide function signature withouth self, * and **.
Create a dictionary of function parameters from a function
signature object, omitting 'self' and */** parameters
Parameters
----------
signature: inspect.Signature
Signature of function or method
Returns
-------
dict
Parameters and their defaults in form of {parameter_name: default_value}
"""
return {
p.name: p.default if p.default != inspect.Parameter.empty else None
for p in signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD and p.kind != p.VAR_POSITIONAL
}
def tsmodel_wrapper_constructor_factory(modeltype):
"""Bring original and modeltype class arguments under one constructor
This parametrized decorator joins the signatures of the constructors
of two classes into one constructor. It must only be used on the __init__ function
of a class. It is intended to join the parameters of a specific model
with the parameters of its wrapper in a way that is compliant with scikit-learn,
enabling the use of get_params()/set_params() on the wrapper
as if one was directly interacting with the model being wrapped.
Parameters
----------
modeltype : class
Model class providing the constructor to be joined with the target
constructor of the inner decorator
Returns
-------
callable
inner decorator applied to the target constructor
"""
def tsmodel_wrapper_constructor(init_func):
"""Bring original and modeltype class arguments under one constructor
This decorator picks up the constructor of the class supplied to
the outer decorator and performs the join with the target constructor.
building a new constructor from scratch using string compilation
(https://docs.python.org/3/library/functions.html#compile)
Parameters
----------
init_func : callable
The target constructor to be decorated
Returns
-------
callable
New constructor which accepts both the original arguments and agrumets from the 'modeltype' class
"""
orig_signature = inspect.signature(init_func)
orig_parameters = get_clean_param_dict(orig_signature)
model_signature = inspect.signature(modeltype.__init__)
model_parameters = get_clean_param_dict(model_signature)
full_parameter_names = list(model_parameters) + list(orig_parameters)
full_parameter_defaults = list(model_parameters.values()) + list(orig_parameters.values())
assignments = "; ".join([f"self.{p}={p}" for p in full_parameter_names])
constructor_code = compile(
f'def __init__(self, {", ".join(full_parameter_names)}): ' f"{assignments}",
"<string>",
"exec",
)
modified_init_func = FunctionType(
constructor_code.co_consts[0],
globals(),
"__init__",
tuple(full_parameter_defaults),
)
return modified_init_func
return tsmodel_wrapper_constructor
class TSModelWrapper(BaseEstimator, metaclass=ABCMeta):
"""Base class for all model wrappers in hcrystalball"""
@abstractmethod
def __init__(self):
pass
def _init_tsmodel(self, model_cls, **extra_args):
"""Initialiaze `model_cls`.
The inner model with model default parameters plus parameters
provided during initialization of the model wrapper"
Parameters
----------
model_cls : class
Model class
Returns
-------
Any
instance of `model_cls`
"""
model_signature = inspect.signature(model_cls.__init__)
model_params = get_clean_param_dict(model_signature)
params = {k: v for k, v in self.get_params().items() if k in model_params}
return self._set_model_extra_params(model_cls(**{**params, **extra_args}))
def _set_model_extra_params(self, model):
return model
@staticmethod
def _transform_data_to_tsmodel_input_format(self, X, y=None):
"""Placeholder method for child classes
Each class to develop model wrapper to transform X and y to
model required format.
Parameters
----------
X : pandas.DataFrame
Input features.
y : array_like, (1d)
Target vector.
Returns
-------
X, y
X - pandas.DataFrame with features
y - array_like or None for target
"""
return X, y
def _clip_predictions(self, preds):
"""Clip provided predictions between `clip_predictions_lower` and `clip_predictions_upper`
Parameters
----------
preds : pandas.DataFrame
Predictions
Returns
-------
pandas.DataFrame
Clipped predictions.
"""
preds[self.name] = preds[self.name].clip(
lower=self.clip_predictions_lower, upper=self.clip_predictions_upper
)
return preds
__all__ = ["TSModelWrapper"]
| true |
971dbb8d8330e10207e2709768343b8538f86c64 | Python | BerryAI/Acai | /OpenMRS/data/__init__.py | UTF-8 | 1,063 | 2.625 | 3 | [
"MIT"
] | permissive | import json
from os import path
import sys
from OpenMRS.catalog import Track
CWD = path.dirname(path.realpath(__file__))
def get_example_ratings():
"""
Returned `data` is a dict of the following structure:
user_id_1: {
track_id_1: rating_1,
track_id_2: rating_2, ...
} ...
"""
data = json.load(open(path.join(CWD, 'acai_game_user_ratings.json')))
data = dict(data.items()[:5]) # TODO: remove this!
return data
def get_example_tracks(ratings=None):
ratings = ratings or get_example_ratings()
saved_tracks = json.load(open(path.join(CWD, 'tracks.json')))
tracks = {}
for _, rating_per_user in ratings.iteritems():
for track_id in rating_per_user:
track_data = saved_tracks.get(track_id) or {
'id': track_id,
'title':'Track %s' % track_id,
'artist': 'Unknown',
'streaming_url': None
}
tracks[track_id] = Track(track_data=track_data, source='example')
return tracks.values()
| true |
fbd47ab95f9d19dae0e99df9c7290c2e28da9b8d | Python | Priyansh-Kedia/MLMastery | /34_combine_under_over_sampling.py | UTF-8 | 752 | 3.34375 | 3 | [] | no_license |
# The example below demonstrates how to use the SMOTEENN that combines
# both SMOTE oversampling of the minority class and Edited Nearest
# Neighbors undersampling of the majority class.
# example of both undersampling and oversampling
from collections import Counter
from sklearn.datasets import make_classification
from imblearn.combine import SMOTEENN
# generate dataset
X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0, n_clusters_per_class=1, weights=[0.99, 0.01], flip_y=0)
# summarize class distribution
print(Counter(y))
# define sampling strategy
sample = SMOTEENN(sampling_strategy=0.5)
# fit and apply the transform
X_over, y_over = sample.fit_resample(X, y)
# summarize class distribution
print(Counter(y_over)) | true |
213fbd80dd698b20bc9638a051228805d6a29b6f | Python | taehwan920/Algorithm | /baekjoon/2667_complex_numbering.py | UTF-8 | 1,084 | 3.3125 | 3 | [] | no_license | import sys
from collections import deque
n = int(sys.stdin.readline())
house = []
visited = [[False for i in range(n)] for i in range(n)]
result = []
for _ in range(n):
house.append(list(map(int, input())))
def bfs(x, y):
q = deque([[x, y]])
direction = [(0, 1), (0, -1), (1, 0), (-1, 0)]
visited[x][y] = True
# 이 행과 이 윗 행의 코드가 누락되면 단지내 집 수가 1일 경우 탐색을 하지 않고 넘어가기때문에 집 수가 0으로 나오는 오류가 있음.
count = 1
while q:
t_x, t_y = q.popleft()
for dx, dy in direction:
nx, ny = t_x + dx, t_y + dy
if nx < 0 or nx >= n or ny < 0 or ny >= n:
continue
if house[nx][ny] and not visited[nx][ny]:
count += 1
visited[nx][ny] = True
q.append([nx, ny])
return count
for i in range(n):
for j in range(n):
if house[i][j] and not visited[i][j]:
result.append(bfs(i, j))
result.sort()
print(len(result))
for i in result:
print(i)
| true |
8131a94aae3e036c12e992b597ccbda69f154727 | Python | eduardbadillo/video_tools | /video_tools/count.py | UTF-8 | 831 | 2.71875 | 3 | [
"MIT"
] | permissive | from collections import Counter
from pathlib import Path
import click
from .utils import print_style
@click.group()
def count_group():
pass
@count_group.command()
@click.option("-v", "--verbose", default=False, is_flag=True)
@click.argument("initial_path", type=click.Path(exists=True))
@click.argument("extensions", required=False, nargs=-1)
def count(initial_path, extensions, verbose):
p = Path(initial_path)
counter = Counter()
for file_ in p.glob(f"**/*.*"):
counter[file_.suffix.lower()] += 1
if not extensions:
extensions = list(counter.keys())
for extension in extensions:
print_style(f"Total {extension} files: {counter[extension.lower()]}")
if verbose:
for file_ in p.glob(f"**/*{extension}"):
print_style(str(file_), fg="blue")
| true |
3f08f7951a41a0020e1b3308353931b8fb81e252 | Python | LOOKCC/2018-seed-cup | /glu/utils/dataset.py | UTF-8 | 2,305 | 2.515625 | 3 | [] | no_license | import torch
import torch.utils.data as data
class TrainDataset(data.Dataset):
def __init__(self, title, desc, cate1, cate2, cate3, t_len, d_len):
self.title = title
self.desc = desc
self.cate1 = cate1
self.cate2 = cate2
self.cate3 = cate3
self.t_len = t_len
self.d_len = d_len
def __getitem__(self, idx):
return self.title[idx], self.desc[idx], \
self.cate1[idx], self.cate2[idx], self.cate3[idx], \
self.t_len[idx], self.d_len[idx]
def __len__(self):
return self.title.size(0)
class EvalDataset(data.Dataset):
def __init__(self, w_title, w_desc, c_title, c_desc,
cate1, cate2, cate3,
w_t_len, w_d_len, c_t_len, c_d_len):
self.w_title = w_title
self.w_desc = w_desc
self.c_title = c_title
self.c_desc = c_desc
self.cate1 = cate1
self.cate2 = cate2
self.cate3 = cate3
self.w_t_len = w_t_len
self.w_d_len = w_d_len
self.c_t_len = c_t_len
self.c_d_len = c_d_len
def __getitem__(self, idx):
return self.w_title[idx], self.w_desc[idx], self.c_title[idx], self.c_desc[idx], \
self.cate1[idx], self.cate2[idx], self.cate3[idx], \
self.w_t_len[idx], self.w_d_len[idx], self.c_t_len[idx], self.c_d_len[idx]
def __len__(self):
return self.w_title.size(0)
class TestDataset(data.Dataset):
def __init__(self, w_title, w_desc, c_title, c_desc,
w_t_len, w_d_len, c_t_len, c_d_len):
self.w_title = w_title
self.w_desc = w_desc
self.c_title = c_title
self.c_desc = c_desc
self.w_t_len = w_t_len
self.w_d_len = w_d_len
self.c_t_len = c_t_len
self.c_d_len = c_d_len
def __getitem__(self, idx):
return self.w_title[idx], self.w_desc[idx], self.c_title[idx], self.c_desc[idx], \
self.w_t_len[idx], self.w_d_len[idx], self.c_t_len[idx], self.c_d_len[idx]
def __len__(self):
return self.w_title.size(0)
def padding(seq, max_len):
pad_seq = torch.zeros((len(seq), max_len), dtype=torch.long)
for pad_s, s in zip(pad_seq, seq):
pad_s[:len(s)] = torch.tensor(s)
return pad_seq
| true |
53085b9b3107326e4afcf86b71f6a4a5129d3262 | Python | evereux/pycatia | /pycatia/mec_mod_interfaces/hybrid_shape.py | UTF-8 | 4,426 | 2.671875 | 3 | [
"MIT"
] | permissive | #! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.any_object import AnyObject
class HybridShape(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| HybridShape
|
| Represents the hybrid shape object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape = com_object
@property
def thickness(self) -> 'HybridShape':
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Thickness() As HybridShape (Read Only)
|
| Returns the thickness of the hybrid shape.
| The thickness is a CATIAHybridShapeThickness.
|
| Example:
| The following example returns the thickness ExtrudeThickness of the
| extrude Extrude.1 as the origin point of the axis system
| AxisSystem0:
|
| Dim Extrude1 As AnyObject
| Set Extrude1 = HybridBody1.HybridShapes.Item ( "Extrude.1" )
| Dim Thickness1 As HybridShapeThickness
| Set Thickness1 = Extrude1.Thickness
:return: HybridShape
:rtype: HybridShape
"""
return HybridShape(self.hybrid_shape.Thickness)
def append_hybrid_shape(self, i_hybrid_shape: 'HybridShape') -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub AppendHybridShape(HybridShape iHybridShape)
|
| Appends a hybrid shape to another hybrid shape.
|
| Parameters:
|
| iHybridShape
| The hybrid shape to append.
|
| Example:
| This example appends the hybrid shape newHybridShape to the hybrid
| shape oldHybridShape:
|
| oldHybridShape.AppendHybridShape (newHybridShape)
:param HybridShape i_hybrid_shape:
:return: None
:rtype: None
"""
return self.hybrid_shape.AppendHybridShape(i_hybrid_shape.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'append_hybrid_shape'
# # vba_code = """
# # Public Function append_hybrid_shape(hybrid_shape)
# # Dim iHybridShape (2)
# # hybrid_shape.AppendHybridShape iHybridShape
# # append_hybrid_shape = iHybridShape
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def compute(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub Compute()
|
| Computes the result of the hybrid shape.
:return: None
:rtype: None
"""
return self.hybrid_shape.Compute()
def __repr__(self):
return f'HybridShape(name="{self.name}")'
| true |
ff618fa80d39c9b2a4b9fea465ea57dc849adcef | Python | peipeiwang6/Evaluating-misassembly | /14_01_reshuffle_HC_LC_BG.py | UTF-8 | 2,069 | 2.828125 | 3 | [] | no_license | '''
input 1: path to save your output files
input 2: HC regions with high confidence
input 3: LC regions with high confidence
input 4: length of chromosomes
'''
import sys,os
import random
import decimal
path = sys.argv[1]
HC_file = sys.argv[2]
LC_file = sys.argv[3]
Length_chr = sys.argv[4]
duplication = open(HC_file,'r').readlines()
for inl in duplication:
region = inl.split('\t')[1]
Length = int(decimal.Decimal(inl.split('\t')[2]))
D['HC__'+region] = Length
deletion = open(LC_file,'r').readlines()
D = {}
for inl in deletion:
region = inl.split('\t')[1]
Length = int(decimal.Decimal(inl.split('\t')[2]))
D['LC__'+region] = Length
key_sorted = sorted(D, key=D.get,reverse=True) ### sort the HC and LC regions, for the longest to the shortest
chromo = open(Length_chr,'r').readlines()
C = {}
L = {}
for inl in chromo:
l = int(inl.strip().split('\t')[1])
chr = inl.split('\t')[0]
C[chr] = l
background = open(Length_chr,'r').readlines()
L = {}
for inl in background:
b = int(inl.strip().split('\t')[1])
chr = inl.split('\t')[0]
loc = range(1,b,100)
for ll in loc:
L['%s-%s'%(chr,ll)] = ll
def LOCUS(locus,x):
for aa in x:
L.pop('%s-%s'%(locus.split('-')[0],aa))
def LOCUS_in_L(locus,x): ### whether all the locus still in the regions
for aa in x:
if '%s-%s'%(locus.split('-')[0],aa) in L:
res = 'T'
else:
res = 'F'
break
return res
for i in range(1,1001):
out = open(path + 'Randomly_choose_HC_LC_result_%s.txt'%(i),'w')
for r in key_sorted:
n = 0
while n == 0:
y = 0
locus = random.choice(list(L.keys()))
length = int(locus.split('-')[1]) + D[r] -1
if length <= C[locus.split('-')[0]] and LOCUS_in_L(locus,range(L[locus],length,100))=='T':
out1 = r.split('__')[0].strip()
out2 = locus.split('-')[0].strip()
out3 = locus.split('-')[1].strip()
out4 = length
out5 = D[r]
out.write('%s\t%s:%s-%s\t%s\n'%(out1,out2,out3,out4,out5))
print(out1+"\t"+out2+':'+out3+'-'+str(out4)+'\t'+str(out5))
LOCUS(locus,range(L[locus],length,100))
n = 1
else:
n = 0
out.close()
| true |
663a7d6314bfe104aaec6bc3eb134629a793e81a | Python | JetBrains/intellij-community | /python/testData/inspections/PyTypeCheckerInspection/PropertyAndFactoryFunction.py | UTF-8 | 194 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | class C(object):
@property
def foo(self):
return 'bar'
def f():
return C()
def test():
f().foo + <warning descr="Expected type 'AnyStr', got 'int' instead">1</warning>
| true |
4a7dd148b045529c1b35ac76542079b4d7d9239f | Python | sarahbkim/cracking_python | /searching/breadth_first_search.py | UTF-8 | 2,169 | 3.453125 | 3 | [] | no_license | # from pythonds.graph import Graph, Vertex
# from pythonds import Queue
from collections import deque
'''
run time is O(V) where V is the vertex
'''
def bfs(g, start):
start.set_distance(0)
start.set_pred(None)
vertQueue = Queue()
vertQueue.enqueue(start)
while vertQueue.size() > 0:
currentVert = vertQueue.dequeue()
for nbr in currentVert.get_connections():
if (nbr.get_color() == 'white'):
nbr.set_color('gray')
nbr.set_distance(currentVert.getDistance() + 1)
nbr.set_pred(currentVert)
vertQueue.enqueue(nbr)
currentVert.set_color('black')
def build_graph(word_file):
d = {}
g = Graph()
wfile = open(word_file, 'r')
# create buckets of words that differ by 1 letter
for line in wfile:
word = line[:-1]
for i in range(len(word)):
bucket = word[:i] + '_' + word[i+1:]
if bucket in d:
d[bucket].append(word)
else:
d[bucket] = [word]
# vertices and edges for words in same bucket
for bucket in d.keys():
for word1 in d[bucket]:
for word2 in d[bucket]:
if word1 != word2:
g.add_edge(word1, word2)
build_graph('dictionary.txt')
# another example
graph = {'A': ['B','E','C'],
'B': ['A','C'],
'C': ['D'],
'D': ['C'],
'E': ['F','D'],
'F': ['C']}
def my_bfs(graph, start, end):
q = deque()
q.append(start)
path = []
# while queu is not empty...
while q:
curr = q.popleft()
path.append(curr)
# check each vertex connected to the current
for item in graph[curr]:
if item not in path:
path.append(item)
last_node = path[len(path)-1]
# return the path if I find the right end
if last_node == end:
return path
# otherwise, append this item to the queue and start again
path = []
path.append(start)
q.append(item)
| true |
4fb41b96b8345123b3ab3c2d37cd5184ad32491f | Python | ramonvaleriano/python- | /Livros/Introdução à Programação - 500 Algoritmos resolvidos/Capitulo 4/Exercicios 4a/Algoritmo236_Para63.py | UTF-8 | 226 | 3.21875 | 3 | [
"MIT"
] | permissive | # Program: Algoritmo236_Para63.py
# Author: Ramon R. Valeriano
# Descritption:
# Developed: 02/04/2020 - 23:01
# Updated:
number = int(input("Enter with the number: "))
sum_ = 0
for e in range(number):
sum_+=(1/number)
print(sum_)
| true |
86f9a42c30f749f733485fad0b4f2cf048d2ada7 | Python | ENCODE-DCC/wgot | /wgot/utils.py | UTF-8 | 10,229 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from datetime import datetime
import mimetypes
import hashlib
import math
import os
import sys
from collections import namedtuple, deque
from functools import partial
from email.utils import mktime_tz, parsedate_tz
from .constants import MAX_PARTS
from .constants import MAX_SINGLE_UPLOAD_SIZE
from .compat import PY3
from .compat import queue
class MD5Error(Exception):
"""
Exception for md5's that do not match.
"""
pass
class StablePriorityQueue(queue.Queue):
"""Priority queue that maintains FIFO order for same priority items.
This class was written to handle the tasks created in
awscli.customizations.s3.tasks, but it's possible to use this
class outside of that context. In order for this to be the case,
the following conditions should be met:
* Objects that are queued should have a PRIORITY attribute.
This should be an integer value not to exceed the max_priority
value passed into the ``__init__``. Objects with lower
priority numbers are retrieved before objects with higher
priority numbers.
* A relatively small max_priority should be chosen. ``get()``
calls are O(max_priority).
Any object that does not have a ``PRIORITY`` attribute or whose
priority exceeds ``max_priority`` will be queued at the highest
(least important) priority available.
"""
def __init__(self, maxsize=0, max_priority=20):
queue.Queue.__init__(self, maxsize=maxsize)
self.priorities = [deque([]) for i in range(max_priority + 1)]
self.default_priority = max_priority
def _qsize(self):
size = 0
for bucket in self.priorities:
size += len(bucket)
return size
def _put(self, item):
priority = min(getattr(item, 'PRIORITY', self.default_priority),
self.default_priority)
self.priorities[priority].append(item)
def _get(self):
for bucket in self.priorities:
if not bucket:
continue
return bucket.popleft()
def get_file_stat(path):
"""
This is a helper function that given a local path return the size of
the file in bytes and time of last modification.
"""
try:
stats = os.stat(path)
update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal())
except (ValueError, IOError) as e:
raise ValueError('Could not retrieve file stat of "%s": %s' % (
path, e))
return stats.st_size, update_time
def check_etag(etag, fileobj):
"""
This fucntion checks the etag and the md5 checksum to ensure no
data was corrupted upon transfer.
"""
get_chunk = partial(fileobj.read, 1024 * 1024)
m = hashlib.md5()
for chunk in iter(get_chunk, b''):
m.update(chunk)
if '-' not in etag:
if etag != m.hexdigest():
raise MD5Error
def check_error(response_data):
"""
A helper function that prints out the error message recieved in the
response_data and raises an error when there is an error.
"""
if response_data:
if 'Error' in response_data:
error = response_data['Error']
raise Exception("Error: %s\n" % error['Message'])
def create_warning(path, error_message):
"""
This creates a ``PrintTask`` for whenever a warning is to be thrown.
"""
print_string = "warning: "
print_string = print_string + "Skipping file " + path + ". "
print_string = print_string + error_message
warning_message = PrintTask(message=print_string, error=False,
warning=True)
return warning_message
def find_chunksize(size, current_chunksize):
"""
The purpose of this function is determine a chunksize so that
the number of parts in a multipart upload is not greater than
the ``MAX_PARTS``. If the ``chunksize`` is greater than
``MAX_SINGLE_UPLOAD_SIZE`` it returns ``MAX_SINGLE_UPLOAD_SIZE``.
"""
chunksize = current_chunksize
num_parts = int(math.ceil(size / float(chunksize)))
while num_parts > MAX_PARTS:
chunksize *= 2
num_parts = int(math.ceil(size / float(chunksize)))
if chunksize > MAX_SINGLE_UPLOAD_SIZE:
return MAX_SINGLE_UPLOAD_SIZE
else:
return chunksize
class MultiCounter(object):
"""
This class is used as a way to keep track of how many multipart
operations are in progress. It also is used to track how many
part operations are occuring.
"""
def __init__(self):
self.count = 0
def uni_print(statement, out_file=None):
"""
This function is used to properly write unicode to a file, usually
stdout or stdderr. It ensures that the proper encoding is used if the
statement is not a string type.
"""
if out_file is None:
out_file = sys.stdout
# Check for an encoding on the file.
encoding = getattr(out_file, 'encoding', None)
if encoding is not None and not PY3:
out_file.write(statement.encode(out_file.encoding))
else:
try:
out_file.write(statement)
except UnicodeEncodeError:
# Some file like objects like cStringIO will
# try to decode as ascii. Interestingly enough
# this works with a normal StringIO.
out_file.write(statement.encode('utf-8'))
out_file.flush()
def bytes_print(statement):
"""
This function is used to properly write bytes to standard out.
"""
if PY3:
if getattr(sys.stdout, 'buffer', None):
sys.stdout.buffer.write(statement)
else:
# If it is not possible to write to the standard out buffer.
# The next best option is to decode and write to standard out.
sys.stdout.write(statement.decode('utf-8'))
else:
sys.stdout.write(statement)
def guess_content_type(filename):
"""Given a filename, guess it's content type.
If the type cannot be guessed, a value of None is returned.
"""
return mimetypes.guess_type(filename)[0]
def relative_path(filename, start=os.path.curdir):
"""Cross platform relative path of a filename.
If no relative path can be calculated (i.e different
drives on Windows), then instead of raising a ValueError,
the absolute path is returned.
"""
try:
dirname, basename = os.path.split(filename)
relative_dir = os.path.relpath(dirname, start)
return os.path.join(relative_dir, basename)
except ValueError:
return os.path.abspath(filename)
def date_parser(date_string):
return datetime.fromtimestamp(mktime_tz(parsedate_tz(date_string)))
class PrintTask(namedtuple('PrintTask',
['message', 'error', 'total_parts', 'warning'])):
def __new__(cls, message, error=False, total_parts=None, warning=None):
"""
:param message: An arbitrary string associated with the entry. This
can be used to communicate the result of the task.
:param error: Boolean indicating a failure.
:param total_parts: The total number of parts for multipart transfers.
:param warning: Boolean indicating a warning
"""
return super(PrintTask, cls).__new__(cls, message, error, total_parts,
warning)
IORequest = namedtuple('IORequest',
['filename', 'offset', 'data', 'is_stream'])
# Used to signal that IO for the filename is finished, and that
# any associated resources may be cleaned up.
IOCloseRequest = namedtuple('IOCloseRequest', ['filename'])
class IncompleteReadError(Exception):
"""HTTP response did not return expected number of bytes."""
fmt = ('{actual_bytes} read, but total bytes '
'expected is {expected_bytes}.')
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
class StreamingBody(object):
"""Wrapper class for an http response body.
This provides a few additional conveniences that do not exist
in the urllib3 model:
* Auto validation of content length, if the amount of bytes
we read does not match the content length, an exception
is raised.
"""
def __init__(self, response):
self._raw_stream = response.raw
self._content_length = response.headers.get('content-length')
self._amount_read = 0
def read(self, amt=None):
chunk = self._raw_stream.read(amt)
self._amount_read += len(chunk)
if not chunk or amt is None:
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
self._verify_content_length()
return chunk
def _verify_content_length(self):
if self._content_length is not None and \
self._amount_read != int(self._content_length):
raise IncompleteReadError(
actual_bytes=self._amount_read,
expected_bytes=int(self._content_length))
def _validate_content_length(expected_content_length, body_length):
# See: https://github.com/kennethreitz/requests/issues/1855
# Basically, our http library doesn't do this for us, so we have
# to do this ourself.
if expected_content_length is not None:
if int(expected_content_length) != body_length:
raise IncompleteReadError(
actual_bytes=body_length,
expected_bytes=int(expected_content_length))
| true |
f210d8d620cacc5dc8a176efad0a976877e90d08 | Python | bedusing/leetcode | /dynamic/subsequence.py | UTF-8 | 1,232 | 3.890625 | 4 | [] | no_license | # coding:utf-8
def the_longest_incr_sub_seq(seq):
"""
给定一个数列,长度为N,
求这个数列的最长上升(递增)子数列(LIS)的长度.
以 1 7 2 8 3 4 为例
这个数列的最长递增子数列是 1 2 3 4,长度为4;
次长的长度为3, 包括 1 7 8; 1 2 3 等.
问题定义:
设F{k}为:以数列中第k项结尾的最长递增子序列的长度.
求F{1}..F{N} 中的最大值.
转移方程:
F{1} = 1 (根据状态定义导出边界情况)
F{k} = max(F{i}+1 | A{k}>A{i}, when i in (1..k-1)) (k>1)
"""
def _get_k_len(seq, index, max_len_seq):
if 0 == index:
return 1
max_len = 0
for i in xrange(index):
klen = max_len_seq[i] if i in max_len_seq else _get_k_len(seq, i, max_len_seq)
if seq[index] > seq[i]:
klen += 1
max_len = max(klen, max_len)
return max_len
count = len(seq)
max_len = 0
max_len_seq = {}
for index in xrange(count):
klen = max_len_seq[index] = _get_k_len(seq, index, max_len_seq)
max_len = max(max_len, klen)
return max_len
print the_longest_incr_sub_seq([1, 7, 2, 8, 3, 4])
| true |
f6d0e08c3c4413af24f48c2f8c0347708665f59b | Python | ivyfangqian/python-learning | /huice_day3/exercise.py | UTF-8 | 3,645 | 4.3125 | 4 | [] | no_license | # -*-coding:utf-8-*-
# 1、用户输入一个年份,判断这一年是不是闰年,如果是,打印 xxx年是闰年,如果不是,打印xxx年不是闰年
# 一:能被4整除,但不能被100整除的年份(例如2008是闰年,1900不是闰年)
# 或者
# 二:能被400整除的年份(例如2000年)也是闰年。
year = input('请输入年份:')
if type(year).__name__ != 'int':
print '请输入正确年份'
else:
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
print '%d 是闰年' % year
else:
print '%d 不是闰年' % year
# 2、打印乘法表
# 1x1=1
# 1x2=2 2x2=4
# 1x3=3 2x3=6 3x3=9
# 1x4=4 2x4=8 3x4=12 4x4=16
# 1x5=5 2x5=10 3x5=15 4x5=20 5x5=25
# 1x6=6 2x6=12 3x6=18 4x6=24 5x6=30 6x6=36
# 1x7=7 2x7=14 3x7=21 4x7=28 5x7=35 6x7=42 7x7=49
# 1x8=8 2x8=16 3x8=24 4x8=32 5x8=40 6x8=48 7x8=56 8x8=64
# 1x9=9 2x9=18 3x9=27 4x9=36 5x9=45 6x9=54 7x9=63 8x9=72 9x9=81
for i in range(1, 10): # 控制行数
for j in range(1, i + 1): # 控制每行打印的等式
print str(j) + '*' + str(i) + '=' + str(j * i) + '\t',
print '\n'
# 3、打印1-1000中的所有素数
# 素数,除了1和它本身以外不再有其他因数的数称为素数。
primeList = []
for num in range(1, 1001):
for i in range(2, num):
if (num % i) == 0:
break
else:
primeList.append(num)
print primeList
# 4、求100-999中的水仙花数
# 若三位数abc,abc=a^3+b^3+c^3, 则称abc为水仙花数。如153,1^3+5^3+3^3=1+125+27=153,则153是水仙花数
for number in range(100, 1000):
a = number // 100
b = (number - a * 100) // 10
c = number - a * 100 - b * 10
# 判断三个位数的3次幂的和是不是跟他它本身相等
if a ** 3 + b ** 3 + c ** 3 == number:
print number, 'is narcissus number.'
# 5、某公司的市内通话计费标准为:
# 三分钟内 0.2元
# 三分钟后每增加一分钟增加0.2元,不足一分钟按照一分钟算
# 要求编写程序,给定一个通话时间(单位:s),计算出收费金额
chat_time = 120
mobile_cost = 0.2
if chat_time > 0 and chat_time<=180:
mobile_cost = 0.2
if chat_time > 180:
if (chat_time - 180) % 60 == 0:
mobile_cost = mobile_cost + 0.2 * ((chat_time - 180) // 60)
else:
mobile_cost = mobile_cost + 0.2 * ((chat_time - 180) // 60) + 0.2
else:
print mobile_cost
# 6、某市的出租车计费标准为:
# 3公里内10元,3公里以后每增加0.5公里加收1元,每等待2分钟加收1元
# 超过15公里加收原价50%的空驶费
# 每个写成一个函数
# 要求编写程序,对于任意给定的里程数(单位:km)和等待时间(单位:s)计算出应付车费
def taxi_cost(length, waitTime):
# 费用
cost = 10
# 判断里程数输入是否合法
if type(length).__name__ != 'int' and type(length).__name__ != 'float':
print '里程数输入不合法'
return
# 判断等待时间输入是否合法
if type(waitTime).__name__ != 'int' and type(waitTime).__name__ != 'float':
print '等待时间输入不合法'
return
if length > 3:
if (length - 3) % 0.5 == 0:
cost = cost + (length - 3) / 0.5
else:
cost = cost + (length - 3) / 0.5 + 1
if waitTime >= 120:
if waitTime % 120 == 0:
cost = cost + waitTime / 120
else:
cost = cost + waitTime / 120 + 1
if length >= 15:
cost = cost * 1.5
return cost
print taxi_cost(12, 120)
| true |
60296a2f34baada3a0833d9d4e365679ca6b1082 | Python | Holemar/notes | /_utils/python/libs_my/abandon/version.py | UTF-8 | 3,841 | 2.734375 | 3 | [] | no_license | #!python
# -*- coding:utf-8 -*-
"""
Created on 2014/8/29
Updated on 2019/1/18
@author: Holemar
本模块专门供监控、调试用
"""
import os
import sys
import time
import types
import logging
from __init__ import *
from libs_my import str_util, html_util
__all__=('init', 'get_version')
logger = logging.getLogger('libs_my.version')
# 请求默认值
CONFIG = {
'version' : None, # {string} 版本号
'db_fun': None, # {Function|list<Function>} 检查数据库连接是否正常的函数(需要空参,可直接调用,如 mysql_util.ping)
}
def init(**kwargs):
"""
设置get和post函数的默认参数值
:param {string} version: 版本号
:param {Function|list<Function>} db_fun: 检查数据库连接是否正常的函数(需要空参,可直接调用,如 mysql_util.ping)
"""
global CONFIG
CONFIG.update(kwargs)
#def get_version(version, db_fun=None, **kwargs):
def get_version(*args, **kwargs):
'''
查看代码版本号,返回字典类型的内容
:param {string} version: 此系统的版本号
:param {Function|list<Function>} db_fun: 查看数据库连接是否正常的函数(需要空参,可直接调用,如 mysql_util.ping)
需要查看多个数据库(如redis+mysql),可用列表传多个函数过来
:return {dict}: {
"result":{int}返回码, #0:成功, -1:数据库异常, 500:程序异常
"reason":{string} 程序异常/正常的说明,
"version":{string} 本程序版本号,
"update_time":{string} 本程序更新时间, #格式为:"yyyy-MM-dd HH:mm:ss"
"now": {string} 系统当前时间, # 格式为:"yyyy-MM-dd HH:mm:ss"
"use_time": {string} 本接口反应所用的时间,单位秒
}
:example
version_info = version.get_version(version="agw 1.2.0", db_fun=[cache_redis.ping, mysql_util.ping])
'''
global CONFIG
try:
start_time = time.time()
version = args[0] if len(args) >= 1 else kwargs.pop('version', CONFIG.get('version'))
db_fun = args[1] if len(args) >= 2 else kwargs.pop('db_fun', CONFIG.get('db_fun'))
# 测试数据库是否连上
db_success = True
if db_fun:
if isinstance(db_fun, (list,tuple,set)):
for fun in db_fun:
db_success = db_success & fun()
elif isinstance(db_fun, types.FunctionType):
db_success = db_fun()
res = {
'result' : 0 if db_success else -1, # 成功/失败状态,0:成功, -1:数据库异常
'reason':u'访问成功' if db_success else u'数据库异常',
'version' : version,
'update_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(__file__))), # 本文件更新时间
'now' : time.strftime('%Y-%m-%d %H:%M:%S'), # 系统时间,用来核对系统时间是否正确
}
use_time = time.time() - start_time
res["use_time"] = "%.4f" % use_time # 使用时间
# 调用时有传其他参数,则格式化成方便人查看的模式返回
if kwargs:
res = html_util.to_html(str_util.to_human(res))
return res
except Exception, e:
logger.error(u"[red]查询版本号出现异常[/red],%s: %s", e.__class__.__name__, e, exc_info=True, extra={'color':True})
return {"result":500, "reason":u'查询出现异常,%s: %s' % (e.__class__.__name__, e) }
# 废弃下面获取错误信息的方法,效果跟上面一样
#info = sys.exc_info()
#logger.error(u"查询版本号出现异常,%s: %s" % (info[0].__name__, info[1]), exc_info=True, extra={'color':True})
#return {"result":500, "reason":u'查询出现异常,%s: %s' % (info[0].__name__, info[1]) }
| true |
33e2bb474611c87b57b1b69d850436ec892c9d9b | Python | KrishnaPrasath/GUVI- | /CodeKata/OddOrEven.py | UTF-8 | 197 | 3.734375 | 4 | [] | no_license | def oddOrEven(num):
if(num>0):
if num%2==0:
print("Even")
else:
print("Odd")
else:
print("Invalid")
num = int(input())
oddOrEven(num)
| true |
8f2b9cffa449cbc61958f927b9101f5278c01dee | Python | PrathibaKannan/codekata | /minutes_diff.py | UTF-8 | 262 | 2.8125 | 3 | [] | no_license | hr1=int(input())
min1=int(input())
sec1=int(input())
hr2=int(input())
min2=int(input())
sec2=int(input())
if hr1>hr2:
h=hr1-hr2
else:
h=hr2-hr1
if min1>min2:
m=min1-min2
else:
m=min2-min1
if sec1>sec2:
s=sec1-sec2
else:
s=sec2-sec1
m1=h*60
m=m1+m
print(m)
| true |
6407395fa74971333ef5d5c846a0748df80e28d2 | Python | bendcrysler/twitter-project | /database_functionality.py | UTF-8 | 14,818 | 2.578125 | 3 | [] | no_license | import sqlite3
import tweepy
import random
from sqlite3 import Error
# functions to establish connection, create tables, and insert data
def create_db_connection(db_file):
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
return None
def create_tables(cursor):
try:
cursor.execute("""CREATE TABLE
IF NOT EXISTS tweets (
id text PRIMARY KEY NOT NULL,
tweet_text text NOT NULL,
user_id text NOT NULL REFERENCES users(id),
time_posted text NOT NULL,
hearts integer NOT NULL,
retweets integer NOT NULL,
retweeted_tweet text REFERENCES tweets(id),
quoted_tweet text REFERENCES tweets(id),
in_reply_to_tweet text REFERENCES tweets(id),
in_reply_to_user text REFERENCES users(id),
source_app text NOT NULL,
source_app_url text NOT NULL,
coordinates text,
place text,
language text NOT NULL,
contributors text,
truncated integer NOT NULL,
dev_heart integer NOT NULL,
dev_retweet integer NOT NULL
)""")
cursor.execute("""CREATE TABLE
IF NOT EXISTS users (
id text PRIMARY KEY NOT NULL,
name text NOT NULL,
handle text NOT NULL,
location text,
description text NOT NULL,
website text,
protected integer NOT NULL,
followers integer NOT NULL,
following integer NOT NULL,
listed integer NOT NULL,
created_at text NOT NULL,
favorites integer NOT NULL,
utc_offset integer,
timezone text,
geo_enabled integer NOT NULL,
verified integer NOT NULL,
tweets integer NOT NULL,
language text NOT NULL,
contributors_enabled integer NOT NULL,
translator integer NOT NULL,
translation_enabled integer NOT NULL,
extended_profile integer NOT NULL,
default_profile integer NOT NULL,
default_avatar integer NOT NULL,
dev_follow integer NOT NULL,
translator_type text
)""")
cursor.execute("""CREATE TABLE
IF NOT EXISTS hashtags (
tweet_id text NOT NULL REFERENCES tweets(id),
hashtag text NOT NULL,
PRIMARY KEY(tweet_id, hashtag)
)""")
cursor.execute("""CREATE TABLE
IF NOT EXISTS mentions (
tweet_id text NOT NULL REFERENCES tweets(id),
mention_handle text NOT NULL REFERENCES users(handle),
PRIMARY KEY (tweet_id, mention_handle)
)""")
except Error as e:
print(e)
def insert_tweet(tweet, cursor, api):
# set variables to be inserted into DB
ID = tweet.id_str
u_id = tweet.user.id_str
text = tweet.text
time_posted = str(tweet.created_at)
hearts = tweet.favorite_count
retweets = tweet.retweet_count
source_app = tweet.source
source_app_url = tweet.source_url
language = tweet.lang
hashtags = tweet.entities['hashtags']
mentions = tweet.entities['user_mentions']
if hasattr(tweet, 'retweeted_status'):
retweeted_tweet = tweet.retweeted_status.id_str
insert_tweet(tweet.retweeted_status, cursor, api)
else:
retweeted_tweet = None
if hasattr(tweet, 'quoted_status'):
#TODO: Change quoted_tweet to quoted_tweet_id to correctly define what we're doing here
quoted_tweet = tweet.quoted_status_id_str
insert_tweet(api.get_status(quoted_tweet), cursor, api)
else:
quoted_tweet = None
if hasattr(tweet, 'reply_to_status_id'):
in_reply_to_tweet = tweet.in_reply_to_status_id_str
insert_tweet(api.get_status(in_reply_to_tweet), cursor, api)
in_reply_to_user = tweet.in_reply_to_user_id_str
else:
in_reply_to_tweet = None
in_reply_to_user = None
if hasattr(tweet, 'coordinates'):
coordinates = str(tweet.coordinates)
else:
coordinates = None
if hasattr(tweet, 'place'):
place = str(tweet.place)
else:
place = None
if hasattr(tweet, 'contributors'):
contributors = str(tweet.contributors)
else:
contributors = None
if tweet.truncated == True:
truncated = 1
else:
truncated = 0
if tweet.favorited == True:
dev_heart = 1
else:
dev_heart = 0
if tweet.retweeted == True:
dev_retweet = 1
else:
dev_retweet = 0
query = "INSERT INTO tweets VALUES(:1, :2, :3, :4, :5, :6, :7, :8, :9, :10, :11, :12, :13, :14, :15, :16, :17, :18, :19)"
variables = {'1':ID, '2':text, '3':u_id, '4':time_posted, '5':hearts, '6':retweets, '7':retweeted_tweet, '8':quoted_tweet, '9': in_reply_to_tweet, '10':in_reply_to_user, '11':source_app, '12':source_app_url, '13':coordinates, '14':place, '15':language, '16':contributors, '17':truncated, '18':dev_heart, '19':dev_retweet}
try:
cursor.execute(query, variables)
except Error as e:
e_string = str(e)
if (e_string[:24] == "UNIQUE constraint failed"):
pass
else:
print(e)
insert_user(tweet.user, cursor)
insert_hashtags(ID, hashtags, cursor)
insert_mentions(ID, mentions, cursor, api)
return None
def insert_user(user, cursor):
u_id = user.id_str
u_name = user.name
u_handle = user.screen_name
u_description = user.description
u_protected = user.protected
u_followers = user.followers_count
u_following = user.friends_count
u_listed = user.listed_count
u_created_at = user.created_at
u_favorites = user.favourites_count
u_tweets = user.statuses_count
u_language = user.lang
u_contributors_enabled = user.contributors_enabled
if hasattr(user, 'location'):
u_location = user.location
else:
u_location = None
if hasattr(user, 'url'):
u_website = user.url
else:
u_website = None
if hasattr(user, 'utc_offset'):
u_utc_offset = user.utc_offset
else:
u_utc_offset = None
if hasattr(user, 'timezone'):
u_timezone = user.timezone
else:
u_timezone = None
if user.geo_enabled == True:
u_geo_enabled = 1
else:
u_geo_enabled = 0
if user.verified == True:
u_verified = 1
else:
u_verified = 0
if user.contributors_enabled == True:
u_contributors_enabled = 1
else:
u_contributors_enabled = 0
if user.is_translator == True:
u_is_translator = 1
else:
u_is_translator = 2
if user.is_translation_enabled == True:
u_translation_enabled = 1
else:
u_translation_enabled = 0
if user.has_extended_profile == True:
u_extended_profile = 1
else:
u_extended_profile = 0
if user.default_profile == True:
u_default_profile = 1
else:
u_default_profile = 0
if user.default_profile_image == True:
u_default_avatar = 1
else:
u_default_avatar = 0
if user.following == True:
u_dev_follow = 1
else:
u_dev_follow = 0
if hasattr(user, 'translator_type'):
u_translator_type = user.translator_type
else:
u_translator_type = None
query = "INSERT INTO users VALUES(:1, :2, :3, :4, :5, :6, :7, :8, :9, :10, :11, :12, :13, :14, :15, :16, :17, :18, :19, :20, :21, :22, :23, :24, :25, :26)"
variables = {'1':u_id, '2':u_name, '3':u_handle, '4':u_location, '5':u_description, '6':u_website, '7':u_protected, '8':u_followers, '9':u_following, '10':u_listed, '11':u_created_at, '12':u_favorites, '13':u_utc_offset, '14':u_timezone, '15':u_geo_enabled, '16':u_verified, '17':u_tweets, '18':u_language, '19':u_contributors_enabled, '20':u_is_translator, '21':u_translation_enabled, '22':u_extended_profile, '23':u_default_profile, '24':u_default_avatar, '25':u_dev_follow, '26':u_translator_type}
try:
cursor.execute(query, variables)
except Error as e:
e_string = str(e)
if (e_string[:24] == "UNIQUE constraint failed"):
pass
else:
print(e)
return None
def insert_hashtags(tweet_ID, hashtags, cursor):
query = "INSERT INTO hashtags VALUES(:1, :2)"
for tag in hashtags:
hash_text = tag['text']
try:
cursor.execute(query, {'1':tweet_ID, '2':hash_text})
except Error as e:
e_string = str(e)
if (e_string[:24] == "UNIQUE constraint failed"):
pass
else:
print(e)
return None
def insert_mentions(tweet_ID, mentions, cursor, api):
query = "INSERT INTO mentions VALUES(:1, :2)"
for name in mentions:
mention_text = name['screen_name']
mentioned_user = None
try:
mentioned_user = api.get_user(mention_text)
except tweepy.TweepError as e:
if (e.api_code == 63):
print("Hit tweepy error 63 while attempting to add @" + str(mention_text) + ". User has been suspended.")
elif (e.api_code == None):
print("tweepy rate limit exceeded, wait 15 minutes then try again.")
else:
print("Hit tweepy error code " + str(e.api_code))
# user doesn't get set if error above, so don't try to add it
if (mentioned_user == None):
pass
else: # if no error above, insert user into DB and populate mentions table
insert_user(mentioned_user, cursor)
try:
cursor.execute(query, {'1':tweet_ID, '2':mention_text})
except Error as e:
e_string = str(e)
if (e_string[:24] == "UNIQUE constraint failed"):
pass
else:
print(e)
return None
# function for checking totals
def show_totals(cursor):
cursor.execute("SELECT COUNT(*) FROM tweets")
result = cursor.fetchone()
print("Total tweets collected: " + str(result[0]))
cursor.execute("SELECT COUNT(*) FROM users")
result = cursor.fetchone()
print("Total users collected: " + str(result[0]))
cursor.execute("SELECT COUNT(*) FROM hashtags")
result = cursor.fetchone()
print("Total hashtags collected: " + str(result[0]))
cursor.execute("SELECT COUNT(*) FROM mentions")
result = cursor.fetchone()
print("Total mentions collected: " + str(result[0]))
# analytical functions
def most_followed_user(cursor):
cursor.execute("SELECT MAX(followers) FROM users")
result = cursor.fetchone()
max_followers = int(result[0])
query = "SELECT handle FROM users WHERE followers = :1"
cursor.execute(query, {'1':max_followers,})
result = cursor.fetchone()
print("Most followed user collected: @" + str(result[0]) + " - " + str(max_followers) + " followers")
def most_favorited_tweet(cursor):
cursor.execute("SELECT MAX(hearts) from tweets")
result = cursor.fetchone()
max_favorites = int(result[0])
query = "SELECT tweet_text, user_id FROM tweets WHERE hearts = :1"
cursor.execute(query, {'1':max_favorites,})
result = cursor.fetchone()
mf_tweet_text = str(result[0])
mf_tweet_uid = str(result[1])
query = "SELECT handle FROM users WHERE id = :1"
cursor.execute(query, {'1':mf_tweet_uid,})
result = cursor.fetchone()
mf_tweet_uhandle = str(result[0])
print("Most favorited tweet collected: " + mf_tweet_text + " - " + str(max_favorites) + " favorites, tweeted by @" + mf_tweet_uhandle)
def most_retweeted_tweet(cursor):
cursor.execute("SELECT MAX(retweets) from tweets")
result = cursor.fetchone()
max_retweets = int(result[0])
query = "SELECT tweet_text, user_id FROM tweets WHERE retweets = :1"
cursor.execute(query, {'1':max_retweets,})
result = cursor.fetchone()
mrt_tweet_text = str(result[0])
mrt_tweet_uid = str(result[1])
query = "SELECT handle FROM users WHERE id = :1"
cursor.execute(query, {'1':mrt_tweet_uid,})
result = cursor.fetchone()
mrt_tweet_uhandle = str(result[0])
print("Most retweeted tweet collected: " + mrt_tweet_text + " - " + str(max_retweets) + " retweets, tweeted by @" + mrt_tweet_uhandle)
def best_time_to_tweet_nonzero(cursor):
hours = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
averages = []
cursor.execute("SELECT time_posted, hearts from tweets")
for row in cursor:
result = str(row)
hearts = int(row[1])
time = int(result[13:15])
hour = hours[time]
if hearts > 5:
hour.append(hearts)
for item in hours:
average_hearts = sum(item)/len(item)
averages.append(average_hearts)
best_hour = averages.index(max(averages))
print("Best hour (00:00 - 23:00) of the day to tweet: " + str(best_hour) + ":00")
def benefit_of_hashtags(cursor):
tweets_with_hashtags = []
cursor.execute("SELECT avg(hearts) FROM tweets ")
result = cursor.fetchone()
overall_avg = float(result[0])
cursor.execute("SELECT avg(hearts) FROM tweets INNER JOIN hashtags on hashtags.tweet_id = tweets.id")
result = cursor.fetchone()
hashtag_tweets_avg = float(result[0])
difference = hashtag_tweets_avg - overall_avg
difference = round(difference,1)
if (difference > 0):
difference = str(difference)
print("Tweets with hashtags average " + difference + " MORE favorites than tweets without hashtags")
else:
difference = str(abs(difference))
print("Tweets with hashtags average " + difference + " LESS favorites than tweets without hashtags")
#TODO: check brothers' tweet popularity
| true |
190ccc675adaa10bfe0943bc74c7ad4c3e3b9dd6 | Python | Godot-dev/IUT_GameJam | /classes/typesProjectiles/radis.py | UTF-8 | 748 | 3.21875 | 3 | [] | no_license | import random
import pygame
from classes.projectile import Projectile
class Radis(Projectile): # Le radis se déplace en ligne droite, mais beaucoup plus rapidement que les autres fruits et légumes
def __init__(self, difficulty, image):
super(Projectile, self).__init__()
self.difficulty = difficulty
self.velocity = 4 + 2 * difficulty
self.pointDeDepart = random.randint(0, 3)
self.angle = 90 * self.pointDeDepart
self.image = image
self.image = pygame.transform.scale(self.image, (200, 200))
self.image = pygame.transform.rotate(self.image, self.angle)
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.image)
self.rect_init() | true |